mirror of
https://github.com/fafhrd91/actix-web
synced 2025-07-03 01:34:32 +02:00
Compare commits
284 Commits
files-v0.6
...
multipart-
Author | SHA1 | Date | |
---|---|---|---|
d77bcb0b7c | |||
c4db9a1ae2 | |||
740d0c0c9d | |||
f27584046c | |||
129b78f9c7 | |||
ad27150c5f | |||
8d5d6a2598 | |||
e97329eb2a | |||
fbfff3e751 | |||
fdfb3d45db | |||
4e05629368 | |||
e35ec28cd2 | |||
35006e9cae | |||
115701eb86 | |||
e2fed91efd | |||
d4b833ccf0 | |||
358c1cf85b | |||
42193bee29 | |||
dc08ea044b | |||
85d88ffada | |||
bf19a0e761 | |||
bf1f169be2 | |||
359d5d5c80 | |||
65c0545a7a | |||
b933ed4456 | |||
4bff1d0abe | |||
fa106da555 | |||
c15016dafb | |||
74688843ba | |||
845156da85 | |||
98752c053c | |||
df6fde883c | |||
8d4cb8c69a | |||
dd9ac4d9b8 | |||
72c80f9107 | |||
b00fe72cf6 | |||
2f0b8a264a | |||
b9f0faafde | |||
6627109984 | |||
b9f54c8796 | |||
cfd40b4f15 | |||
08c2cdf641 | |||
fbd0e5dd0a | |||
7b936bc443 | |||
d2364c80c4 | |||
77459ec415 | |||
6f0a6bd1bb | |||
06c3513bc0 | |||
29bd6a1dd5 | |||
17f7cd2aae | |||
ede645ee4e | |||
6d48593a60 | |||
3c69d078b2 | |||
e7c34f2e45 | |||
d708a4de6d | |||
d97bd7ec17 | |||
fcd06c9896 | |||
1065043528 | |||
45b77c6819 | |||
a2e2c30d59 | |||
83cd061c86 | |||
068909f1b3 | |||
f8cb71e789 | |||
73b94e902d | |||
ad7e67f940 | |||
1519ae7772 | |||
cc7145d41d | |||
172c4c7a0a | |||
fd63305859 | |||
ef64d6a27c | |||
4d3689db5e | |||
894effb856 | |||
07a7290432 | |||
bd5c0af0a6 | |||
c73fba16ce | |||
909461087c | |||
40f7ab38d2 | |||
a9e44bcf07 | |||
7767cf3071 | |||
b59a96d9d7 | |||
037740bf62 | |||
386258c285 | |||
99bf774e94 | |||
35b0fd1a85 | |||
0b5b4dcbf3 | |||
c993055fc8 | |||
679f61cf37 | |||
056de320f0 | |||
f220719fae | |||
c9f91796df | |||
ea764b1d57 | |||
19aa14a9d6 | |||
10746fb2fb | |||
4bbe60b609 | |||
8ff489aa90 | |||
e0a88cea8d | |||
d78ff283af | |||
ce6d520215 | |||
3e25742a41 | |||
20f4cfe6b5 | |||
6408291ab0 | |||
8d260e599f | |||
14bcf72ec1 | |||
6485434a33 | |||
16c7c16463 | |||
9b0fdca6e9 | |||
8759d79b03 | |||
c0d5d7bdb5 | |||
40eab1f091 | |||
75517cce82 | |||
9b51624b27 | |||
8e2ae8cd40 | |||
9a2f8450e0 | |||
23ef51609e | |||
f7d629a61a | |||
e0845d9ad9 | |||
2f79daec16 | |||
f3f41a0cc7 | |||
987067698b | |||
b62f1b4ef7 | |||
df5257c373 | |||
226ea696ce | |||
e524fc86ea | |||
7e990e423f | |||
8f9a12ed5d | |||
c6eba2da9b | |||
06c7945801 | |||
0dba6310c6 | |||
f7d7d92984 | |||
3d6ea7fe9b | |||
8dbf7da89f | |||
de92b3be2e | |||
5d0e8138ee | |||
6b7196225e | |||
265fa0d050 | |||
062127a210 | |||
3926416580 | |||
43671ae4aa | |||
264a703d94 | |||
498fb954b3 | |||
2253eae2bb | |||
8e76a1c775 | |||
dce57a79c9 | |||
6a5b370206 | |||
b1c85ba85b | |||
9aab911600 | |||
017e40f733 | |||
45592b37b6 | |||
8abcb94512 | |||
f2cacc4c9d | |||
56b9c0d08e | |||
de9e41484a | |||
2fed978597 | |||
40048a5811 | |||
e942d3e3b1 | |||
09cffc093c | |||
c58f287044 | |||
7b27493e4c | |||
478b33b8a3 | |||
592b40f914 | |||
fe5279c77a | |||
80d222aa78 | |||
a03a2a0076 | |||
745e738955 | |||
1fd90f0b10 | |||
a35804b89f | |||
5611b98c0d | |||
dce9438518 | |||
be986d96b3 | |||
8ddb24b49b | |||
87f627cd5d | |||
03456b8a33 | |||
8c2fad3164 | |||
62fbd225bc | |||
0fa4d999d9 | |||
da4c849f62 | |||
49cd303c3b | |||
955c3ac0c4 | |||
56e5c19b85 | |||
3f03af1c59 | |||
25c0673278 | |||
e7a05f9892 | |||
2f13e5f675 | |||
9f964751f6 | |||
fcca515387 | |||
075932d823 | |||
cb379c0e0c | |||
d4a5d450de | |||
542200cbc2 | |||
d0c08dbb7d | |||
d0b5fb18d2 | |||
12fb3412a5 | |||
2665357a0c | |||
693271e571 | |||
10ef9b0751 | |||
ce00c88963 | |||
75e6ffb057 | |||
ad38973767 | |||
1c1d6477ef | |||
53509a5361 | |||
a6f27baff1 | |||
218e34ee17 | |||
11bfa84926 | |||
5aa6f713c7 | |||
151a15da74 | |||
1ce58ecb30 | |||
f940653981 | |||
b291e29882 | |||
f843776f36 | |||
52f7d96358 | |||
51e573b888 | |||
38e015432b | |||
f5895d5eff | |||
a0c4bf8d1b | |||
594e3a6ef1 | |||
a808a26d8c | |||
de62e8b025 | |||
3486edabcf | |||
4c59a34513 | |||
1b706b3069 | |||
a9f445875a | |||
e0f02c1d9e | |||
092dbba5b9 | |||
ff4b2d251f | |||
98faa61afe | |||
3f2db9e75c | |||
074d18209d | |||
593fbde46a | |||
161861997c | |||
3d621677a5 | |||
0c144054cb | |||
b0fbe0dfd8 | |||
b653bf557f | |||
1d1a65282f | |||
b0a363a7ae | |||
b4d3c2394d | |||
5ca42df89a | |||
fc5ecdc30b | |||
7fe800c3ff | |||
075df88a07 | |||
391d8a744a | |||
5b6cb681b9 | |||
0957ec40b4 | |||
ccf430d74a | |||
c84c1f0f15 | |||
e9279dfbb8 | |||
a68239adaa | |||
40a4b1ccd5 | |||
7f5a8c0851 | |||
bcdde1d4ea | |||
30aa64ea32 | |||
5469b02638 | |||
a66cd38ec5 | |||
20609e93fd | |||
bf282472ab | |||
7f4b44c258 | |||
66243717b3 | |||
102720d398 | |||
c3c7eb8df9 | |||
21f57caf4a | |||
47f5faf26e | |||
9777653dc0 | |||
9fde5b30db | |||
fd412a8223 | |||
cd511affd5 | |||
3200de3f34 | |||
b3e84b5c4b | |||
a3416112a5 | |||
21a08ca796 | |||
a9f497d05f | |||
cc9ba162f7 | |||
37799df978 | |||
0d93a8c273 | |||
3ae4f0a629 | |||
14a4f325d3 | |||
1bd2076b35 | |||
5454699bab | |||
d7c5c966d2 | |||
50894e392e | |||
008753f07a | |||
c92aa31f91 | |||
c25dd23820 | |||
acacb90b2e | |||
8459f566a8 |
@ -6,9 +6,12 @@ lint-all = "clippy --workspace --all-features --tests --examples --bins -- -Dcli
|
||||
ci-check-min = "hack --workspace check --no-default-features"
|
||||
ci-check-default = "hack --workspace check"
|
||||
ci-check-default-tests = "check --workspace --tests"
|
||||
ci-check-all-feature-powerset="hack --workspace --feature-powerset --skip=__compress,io-uring check"
|
||||
ci-check-all-feature-powerset="hack --workspace --feature-powerset --skip=__compress,experimental-io-uring check"
|
||||
ci-check-all-feature-powerset-linux="hack --workspace --feature-powerset --skip=__compress check"
|
||||
|
||||
# testing
|
||||
ci-doctest-default = "test --workspace --doc --no-fail-fast -- --nocapture"
|
||||
ci-doctest = "test --workspace --all-features --doc --no-fail-fast -- --nocapture"
|
||||
|
||||
# compile docs as docs.rs would
|
||||
# RUSTDOCFLAGS="--cfg=docsrs" cargo +nightly doc --no-deps --workspace
|
||||
|
10
.github/ISSUE_TEMPLATE/bug_report.md
vendored
10
.github/ISSUE_TEMPLATE/bug_report.md
vendored
@ -3,34 +3,40 @@ name: Bug Report
|
||||
about: Create a bug report.
|
||||
---
|
||||
|
||||
Your issue may already be reported!
|
||||
Please search on the [Actix Web issue tracker](https://github.com/actix/actix-web/issues) before creating one.
|
||||
Your issue may already be reported! Please search on the [Actix Web issue tracker](https://github.com/actix/actix-web/issues) before creating one.
|
||||
|
||||
## Expected Behavior
|
||||
|
||||
<!--- If you're describing a bug, tell us what should happen -->
|
||||
<!--- If you're suggesting a change/improvement, tell us how it should work -->
|
||||
|
||||
## Current Behavior
|
||||
|
||||
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
|
||||
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
|
||||
|
||||
## Possible Solution
|
||||
|
||||
<!--- Not obligatory, but suggest a fix/reason for the bug, -->
|
||||
<!--- or ideas how to implement the addition or change -->
|
||||
|
||||
## Steps to Reproduce (for bugs)
|
||||
|
||||
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
|
||||
<!--- reproduce this bug. Include code to reproduce, if relevant -->
|
||||
|
||||
1.
|
||||
2.
|
||||
3.
|
||||
4.
|
||||
|
||||
## Context
|
||||
|
||||
<!--- How has this issue affected you? What are you trying to accomplish? -->
|
||||
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
|
||||
|
||||
## Your Environment
|
||||
|
||||
<!--- Include as many relevant details about the environment you experienced the bug in -->
|
||||
|
||||
- Rust Version (I.e, output of `rustc -V`):
|
||||
|
7
.github/PULL_REQUEST_TEMPLATE.md
vendored
7
.github/PULL_REQUEST_TEMPLATE.md
vendored
@ -2,12 +2,14 @@
|
||||
<!-- Please fill out the following to get your PR reviewed quicker. -->
|
||||
|
||||
## PR Type
|
||||
|
||||
<!-- What kind of change does this PR make? -->
|
||||
<!-- Bug Fix / Feature / Refactor / Code Style / Other -->
|
||||
|
||||
PR_TYPE
|
||||
|
||||
|
||||
## PR Checklist
|
||||
|
||||
<!-- Check your PR fulfills the following items. -->
|
||||
<!-- For draft PRs check the boxes as you complete them. -->
|
||||
|
||||
@ -17,11 +19,10 @@ PR_TYPE
|
||||
- [ ] Format code with the latest stable rustfmt.
|
||||
- [ ] (Team) Label with affected crates and semver status.
|
||||
|
||||
|
||||
## Overview
|
||||
|
||||
<!-- Describe the current and new behavior. -->
|
||||
<!-- Emphasize any breaking changes. -->
|
||||
|
||||
|
||||
<!-- If this PR fixes or closes an issue, reference it here. -->
|
||||
<!-- Closes #000 -->
|
||||
|
3
.github/workflows/bench.yml
vendored
3
.github/workflows/bench.yml
vendored
@ -5,6 +5,9 @@ on:
|
||||
branches:
|
||||
- master
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
jobs:
|
||||
check_benchmark:
|
||||
runs-on: ubuntu-latest
|
||||
|
@ -1,9 +1,12 @@
|
||||
name: CI (master only)
|
||||
name: CI (post-merge)
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
jobs:
|
||||
build_and_test_nightly:
|
||||
strategy:
|
||||
@ -23,6 +26,7 @@ jobs:
|
||||
CI: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
VCPKGRS_DYNAMIC: 1
|
||||
CARGO_UNSTABLE_SPARSE_REGISTRY: true
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
@ -44,18 +48,15 @@ jobs:
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Install cargo-hack
|
||||
uses: taiki-e/install-action@cargo-hack
|
||||
|
||||
- name: Generate Cargo.lock
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: generate-lockfile }
|
||||
- name: Cache Dependencies
|
||||
uses: Swatinem/rust-cache@v1.2.0
|
||||
|
||||
- name: Install cargo-hack
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: install
|
||||
args: cargo-hack
|
||||
|
||||
- name: check minimal
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: ci-check-min }
|
||||
@ -78,76 +79,58 @@ jobs:
|
||||
cargo test --lib --tests -p=actix-multipart --all-features
|
||||
cargo test --lib --tests -p=actix-web-actors --all-features
|
||||
|
||||
- name: tests (io-uring)
|
||||
if: matrix.target.os == 'ubuntu-latest'
|
||||
timeout-minutes: 60
|
||||
run: >
|
||||
sudo bash -c "ulimit -Sl 512
|
||||
&& ulimit -Hl 512
|
||||
&& PATH=$PATH:/usr/share/rust/.cargo/bin
|
||||
&& RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo test --lib --tests -p=actix-files --all-features"
|
||||
|
||||
- name: Clear the cargo caches
|
||||
run: |
|
||||
cargo install cargo-cache --version 0.6.3 --no-default-features --features ci-autoclean
|
||||
cargo install cargo-cache --version 0.8.2 --no-default-features --features ci-autoclean
|
||||
cargo-cache
|
||||
|
||||
ci_feature_powerset_check:
|
||||
name: Verify Feature Combinations
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
env:
|
||||
CI: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable-x86_64-unknown-linux-gnu
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Generate Cargo.lock
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: generate-lockfile }
|
||||
- name: Cache Dependencies
|
||||
uses: Swatinem/rust-cache@v1.2.0
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
|
||||
- name: Install cargo-hack
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: install
|
||||
args: cargo-hack
|
||||
|
||||
- name: check feature combinations
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: ci-check-all-feature-powerset }
|
||||
|
||||
- name: check feature combinations
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: ci-check-all-feature-powerset-linux }
|
||||
|
||||
coverage:
|
||||
name: coverage
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable-x86_64-unknown-linux-gnu
|
||||
profile: minimal
|
||||
override: true
|
||||
uses: taiki-e/install-action@cargo-hack
|
||||
|
||||
- name: Generate Cargo.lock
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: generate-lockfile }
|
||||
run: cargo generate-lockfile
|
||||
- name: Cache Dependencies
|
||||
uses: Swatinem/rust-cache@v1.2.0
|
||||
|
||||
- name: Generate coverage file
|
||||
run: |
|
||||
cargo install cargo-tarpaulin --vers "^0.13"
|
||||
cargo tarpaulin --workspace --features=rustls,openssl --out Xml --verbose
|
||||
- name: Upload to Codecov
|
||||
uses: codecov/codecov-action@v1
|
||||
with: { file: cobertura.xml }
|
||||
- name: check feature combinations
|
||||
run: cargo ci-check-all-feature-powerset
|
||||
|
||||
- name: check feature combinations
|
||||
run: cargo ci-check-all-feature-powerset-linux
|
||||
|
||||
nextest:
|
||||
name: nextest
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
env:
|
||||
CI: 1
|
||||
CARGO_INCREMENTAL: 0
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
|
||||
- name: Install nextest
|
||||
uses: taiki-e/install-action@nextest
|
||||
|
||||
- name: Generate Cargo.lock
|
||||
run: cargo generate-lockfile
|
||||
- name: Cache Dependencies
|
||||
uses: Swatinem/rust-cache@v1.3.0
|
||||
|
||||
- name: Test with cargo-nextest
|
||||
run: cargo nextest run
|
63
.github/workflows/ci.yml
vendored
63
.github/workflows/ci.yml
vendored
@ -6,6 +6,9 @@ on:
|
||||
push:
|
||||
branches: [master]
|
||||
|
||||
permissions:
|
||||
contents: read # to fetch code (actions/checkout)
|
||||
|
||||
jobs:
|
||||
build_and_test:
|
||||
strategy:
|
||||
@ -16,7 +19,7 @@ jobs:
|
||||
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
|
||||
- { name: Windows, os: windows-2022, triple: x86_64-pc-windows-msvc }
|
||||
version:
|
||||
- 1.54.0 # MSRV
|
||||
- 1.59.0 # MSRV
|
||||
- stable
|
||||
|
||||
name: ${{ matrix.target.name }} / ${{ matrix.version }}
|
||||
@ -47,17 +50,26 @@ jobs:
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Install cargo-hack
|
||||
uses: taiki-e/install-action@cargo-hack
|
||||
|
||||
- name: workaround MSRV issues
|
||||
if: matrix.version != 'stable'
|
||||
run: |
|
||||
cargo install cargo-edit --version=0.8.0
|
||||
cargo add const-str@0.3 --dev -p=actix-web
|
||||
cargo add const-str@0.3 --dev -p=awc
|
||||
|
||||
- name: Generate Cargo.lock
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: generate-lockfile }
|
||||
- name: Cache Dependencies
|
||||
uses: Swatinem/rust-cache@v1.2.0
|
||||
|
||||
- name: Install cargo-hack
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: install
|
||||
args: cargo-hack
|
||||
- name: workaround MSRV issues
|
||||
if: matrix.version != 'stable'
|
||||
run: |
|
||||
cargo update -p=zstd-sys --precise=2.0.1+zstd.1.5.2
|
||||
|
||||
- name: check minimal
|
||||
uses: actions-rs/cargo@v1
|
||||
@ -81,19 +93,31 @@ jobs:
|
||||
cargo test --lib --tests -p=actix-multipart --all-features
|
||||
cargo test --lib --tests -p=actix-web-actors --all-features
|
||||
|
||||
- name: Clear the cargo caches
|
||||
run: |
|
||||
cargo install cargo-cache --version 0.8.2 --no-default-features --features ci-autoclean
|
||||
cargo-cache
|
||||
|
||||
io-uring:
|
||||
name: io-uring tests
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
|
||||
- name: Generate Cargo.lock
|
||||
run: cargo generate-lockfile
|
||||
- name: Cache Dependencies
|
||||
uses: Swatinem/rust-cache@v1.3.0
|
||||
|
||||
- name: tests (io-uring)
|
||||
if: matrix.target.os == 'ubuntu-latest'
|
||||
timeout-minutes: 60
|
||||
run: >
|
||||
sudo bash -c "ulimit -Sl 512
|
||||
&& ulimit -Hl 512
|
||||
&& PATH=$PATH:/usr/share/rust/.cargo/bin
|
||||
&& RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo test --lib --tests -p=actix-files --all-features"
|
||||
|
||||
- name: Clear the cargo caches
|
||||
run: |
|
||||
cargo install cargo-cache --version 0.6.3 --no-default-features --features ci-autoclean
|
||||
cargo-cache
|
||||
&& RUSTUP_TOOLCHAIN=stable cargo test --lib --tests -p=actix-files --all-features"
|
||||
|
||||
rustdoc:
|
||||
name: doc tests
|
||||
@ -101,20 +125,13 @@ jobs:
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust (nightly)
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-x86_64-unknown-linux-gnu
|
||||
profile: minimal
|
||||
override: true
|
||||
- uses: dtolnay/rust-toolchain@nightly
|
||||
|
||||
- name: Generate Cargo.lock
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: generate-lockfile }
|
||||
run: cargo generate-lockfile
|
||||
- name: Cache Dependencies
|
||||
uses: Swatinem/rust-cache@v1.3.0
|
||||
|
||||
- name: doc tests
|
||||
uses: actions-rs/cargo@v1
|
||||
run: cargo ci-doctest
|
||||
timeout-minutes: 60
|
||||
with: { command: ci-doctest }
|
||||
|
47
.github/workflows/clippy-fmt.yml
vendored
47
.github/workflows/clippy-fmt.yml
vendored
@ -9,40 +9,41 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
profile: minimal
|
||||
components: rustfmt
|
||||
- name: Check with rustfmt
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: fmt
|
||||
args: --all -- --check
|
||||
- uses: dtolnay/rust-toolchain@nightly
|
||||
with: { components: rustfmt }
|
||||
- run: cargo fmt --all -- --check
|
||||
|
||||
clippy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable
|
||||
profile: minimal
|
||||
components: clippy
|
||||
override: true
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with: { components: clippy }
|
||||
|
||||
- name: Generate Cargo.lock
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: generate-lockfile }
|
||||
run: cargo generate-lockfile
|
||||
- name: Cache Dependencies
|
||||
uses: Swatinem/rust-cache@v1.2.0
|
||||
|
||||
|
||||
- name: Check with Clippy
|
||||
uses: actions-rs/clippy-check@v1
|
||||
with:
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
args: --workspace --tests --examples --all-features
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
lint-docs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
with: { components: rust-docs }
|
||||
|
||||
- name: Check for broken intra-doc links
|
||||
uses: actions-rs/cargo@v1
|
||||
env:
|
||||
RUSTDOCFLAGS: "-D warnings"
|
||||
with:
|
||||
command: doc
|
||||
args: --no-deps --all-features --workspace
|
||||
|
36
.github/workflows/coverage.yml
vendored
Normal file
36
.github/workflows/coverage.yml
vendored
Normal file
@ -0,0 +1,36 @@
|
||||
# disabled because `cargo tarpaulin` currently segfaults
|
||||
|
||||
name: Coverage
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [master]
|
||||
|
||||
jobs:
|
||||
# job currently (1st Feb 2022) segfaults
|
||||
coverage:
|
||||
name: coverage
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install stable
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: stable-x86_64-unknown-linux-gnu
|
||||
profile: minimal
|
||||
override: true
|
||||
|
||||
- name: Generate Cargo.lock
|
||||
uses: actions-rs/cargo@v1
|
||||
with: { command: generate-lockfile }
|
||||
- name: Cache Dependencies
|
||||
uses: Swatinem/rust-cache@v1.2.0
|
||||
|
||||
- name: Generate coverage file
|
||||
run: |
|
||||
cargo install cargo-tarpaulin --vers "^0.13"
|
||||
cargo tarpaulin --workspace --features=rustls,openssl --out Xml --verbose
|
||||
- name: Upload to Codecov
|
||||
uses: codecov/codecov-action@v1
|
||||
with: { file: cobertura.xml }
|
25
.github/workflows/upload-doc.yml
vendored
25
.github/workflows/upload-doc.yml
vendored
@ -4,32 +4,29 @@ on:
|
||||
push:
|
||||
branches: [master]
|
||||
|
||||
permissions: {}
|
||||
jobs:
|
||||
build:
|
||||
permissions:
|
||||
contents: write # to push changes in repo (jamesives/github-pages-deploy-action)
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
- name: Install Rust
|
||||
uses: actions-rs/toolchain@v1
|
||||
with:
|
||||
toolchain: nightly-x86_64-unknown-linux-gnu
|
||||
profile: minimal
|
||||
override: true
|
||||
- uses: dtolnay/rust-toolchain@nightly
|
||||
|
||||
- name: Build Docs
|
||||
uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: doc
|
||||
args: --workspace --all-features --no-deps
|
||||
run: cargo +nightly doc --no-deps --workspace --all-features
|
||||
env:
|
||||
RUSTDOCFLAGS: --cfg=docsrs
|
||||
|
||||
- name: Tweak HTML
|
||||
run: echo '<meta http-equiv="refresh" content="0;url=actix_web/index.html">' > target/doc/index.html
|
||||
|
||||
- name: Deploy to GitHub Pages
|
||||
uses: JamesIves/github-pages-deploy-action@3.7.1
|
||||
uses: JamesIves/github-pages-deploy-action@v4.4.1
|
||||
with:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
BRANCH: gh-pages
|
||||
FOLDER: target/doc
|
||||
folder: target/doc
|
||||
single-commit: true
|
||||
|
1
.prettierrc.yaml
Normal file
1
.prettierrc.yaml
Normal file
@ -0,0 +1 @@
|
||||
proseWrap: never
|
1021
CHANGES.md
1021
CHANGES.md
File diff suppressed because it is too large
Load Diff
151
Cargo.toml
151
Cargo.toml
@ -1,131 +1,19 @@
|
||||
[package]
|
||||
name = "actix-web"
|
||||
version = "4.0.0-beta.21"
|
||||
authors = [
|
||||
"Nikolay Kim <fafhrd91@gmail.com>",
|
||||
"Rob Ede <robjtede@icloud.com>",
|
||||
]
|
||||
description = "Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust"
|
||||
keywords = ["actix", "http", "web", "framework", "async"]
|
||||
categories = [
|
||||
"network-programming",
|
||||
"asynchronous",
|
||||
"web-programming::http-server",
|
||||
"web-programming::websocket"
|
||||
]
|
||||
homepage = "https://actix.rs"
|
||||
repository = "https://github.com/actix/actix-web.git"
|
||||
license = "MIT OR Apache-2.0"
|
||||
edition = "2018"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
# features that docs.rs will build with
|
||||
features = ["openssl", "rustls", "compress-brotli", "compress-gzip", "compress-zstd", "cookies", "secure-cookies"]
|
||||
rustdoc-args = ["--cfg", "docsrs"]
|
||||
|
||||
[lib]
|
||||
name = "actix_web"
|
||||
path = "src/lib.rs"
|
||||
|
||||
[workspace]
|
||||
resolver = "2"
|
||||
members = [
|
||||
".",
|
||||
"actix-files",
|
||||
"actix-http-test",
|
||||
"actix-http",
|
||||
"actix-multipart",
|
||||
"actix-multipart-derive",
|
||||
"actix-router",
|
||||
"actix-test",
|
||||
"actix-web-actors",
|
||||
"actix-web-codegen",
|
||||
"actix-web",
|
||||
"awc",
|
||||
]
|
||||
|
||||
[features]
|
||||
default = ["compress-brotli", "compress-gzip", "compress-zstd", "cookies"]
|
||||
|
||||
# Brotli algorithm content-encoding support
|
||||
compress-brotli = ["actix-http/compress-brotli", "__compress"]
|
||||
# Gzip and deflate algorithms content-encoding support
|
||||
compress-gzip = ["actix-http/compress-gzip", "__compress"]
|
||||
# Zstd algorithm content-encoding support
|
||||
compress-zstd = ["actix-http/compress-zstd", "__compress"]
|
||||
|
||||
# support for cookies
|
||||
cookies = ["cookie"]
|
||||
|
||||
# secure cookies feature
|
||||
secure-cookies = ["cookie/secure"]
|
||||
|
||||
# openssl
|
||||
openssl = ["actix-http/openssl", "actix-tls/accept", "actix-tls/openssl"]
|
||||
|
||||
# rustls
|
||||
rustls = ["actix-http/rustls", "actix-tls/accept", "actix-tls/rustls"]
|
||||
|
||||
# Internal (PRIVATE!) features used to aid testing and checking feature status.
|
||||
# Don't rely on these whatsoever. They may disappear at anytime.
|
||||
__compress = []
|
||||
|
||||
# io-uring feature only avaiable for Linux OSes.
|
||||
experimental-io-uring = ["actix-server/io-uring"]
|
||||
|
||||
[dependencies]
|
||||
actix-codec = "0.4.1"
|
||||
actix-macros = "0.2.3"
|
||||
actix-rt = "2.6"
|
||||
actix-server = "2"
|
||||
actix-service = "2.0.0"
|
||||
actix-utils = "3.0.0"
|
||||
actix-tls = { version = "3.0.0", default-features = false, optional = true }
|
||||
|
||||
actix-http = "3.0.0-beta.19"
|
||||
actix-router = "0.5.0-rc.2"
|
||||
actix-web-codegen = "0.5.0-rc.1"
|
||||
|
||||
ahash = "0.7"
|
||||
bytes = "1"
|
||||
cfg-if = "1"
|
||||
cookie = { version = "0.16", features = ["percent-encode"], optional = true }
|
||||
derive_more = "0.99.5"
|
||||
encoding_rs = "0.8"
|
||||
futures-core = { version = "0.3.7", default-features = false }
|
||||
futures-util = { version = "0.3.7", default-features = false }
|
||||
itoa = "1"
|
||||
language-tags = "0.3"
|
||||
once_cell = "1.5"
|
||||
log = "0.4"
|
||||
mime = "0.3"
|
||||
pin-project-lite = "0.2.7"
|
||||
regex = "1.4"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
serde_urlencoded = "0.7"
|
||||
smallvec = "1.6.1"
|
||||
socket2 = "0.4.0"
|
||||
time = { version = "0.3", default-features = false, features = ["formatting"] }
|
||||
url = "2.1"
|
||||
|
||||
[dev-dependencies]
|
||||
actix-files = "0.6.0-beta.15"
|
||||
actix-test = { version = "0.1.0-beta.11", features = ["openssl", "rustls"] }
|
||||
awc = { version = "3.0.0-beta.19", features = ["openssl"] }
|
||||
|
||||
brotli = "3.3.3"
|
||||
const-str = "0.3"
|
||||
criterion = { version = "0.3", features = ["html_reports"] }
|
||||
env_logger = "0.9"
|
||||
flate2 = "1.0.13"
|
||||
futures-util = { version = "0.3.7", default-features = false, features = ["std"] }
|
||||
rand = "0.8"
|
||||
rcgen = "0.8"
|
||||
rustls-pemfile = "0.2"
|
||||
static_assertions = "1"
|
||||
tls-openssl = { package = "openssl", version = "0.10.9" }
|
||||
tls-rustls = { package = "rustls", version = "0.20.0" }
|
||||
zstd = "0.9"
|
||||
|
||||
[profile.dev]
|
||||
# Disabling debug info speeds up builds a bunch and we don't rely on it for debugging that much.
|
||||
debug = 0
|
||||
@ -140,9 +28,10 @@ actix-files = { path = "actix-files" }
|
||||
actix-http = { path = "actix-http" }
|
||||
actix-http-test = { path = "actix-http-test" }
|
||||
actix-multipart = { path = "actix-multipart" }
|
||||
actix-multipart-derive = { path = "actix-multipart-derive" }
|
||||
actix-router = { path = "actix-router" }
|
||||
actix-test = { path = "actix-test" }
|
||||
actix-web = { path = "." }
|
||||
actix-web = { path = "actix-web" }
|
||||
actix-web-actors = { path = "actix-web-actors" }
|
||||
actix-web-codegen = { path = "actix-web-codegen" }
|
||||
awc = { path = "awc" }
|
||||
@ -155,35 +44,3 @@ awc = { path = "awc" }
|
||||
# actix-utils = { path = "../actix-net/actix-utils" }
|
||||
# actix-tls = { path = "../actix-net/actix-tls" }
|
||||
# actix-server = { path = "../actix-net/actix-server" }
|
||||
|
||||
[[test]]
|
||||
name = "test_server"
|
||||
required-features = ["compress-brotli", "compress-gzip", "compress-zstd", "cookies"]
|
||||
|
||||
[[test]]
|
||||
name = "compression"
|
||||
required-features = ["compress-brotli", "compress-gzip", "compress-zstd"]
|
||||
|
||||
[[example]]
|
||||
name = "basic"
|
||||
required-features = ["compress-gzip"]
|
||||
|
||||
[[example]]
|
||||
name = "uds"
|
||||
required-features = ["compress-gzip"]
|
||||
|
||||
[[example]]
|
||||
name = "on-connect"
|
||||
required-features = []
|
||||
|
||||
[[bench]]
|
||||
name = "server"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "service"
|
||||
harness = false
|
||||
|
||||
[[bench]]
|
||||
name = "responder"
|
||||
harness = false
|
||||
|
677
MIGRATION.md
677
MIGRATION.md
@ -1,677 +0,0 @@
|
||||
## Unreleased
|
||||
|
||||
- The default `NormalizePath` behavior now strips trailing slashes by default. This was
|
||||
previously documented to be the case in v3 but the behavior now matches. The effect is that
|
||||
routes defined with trailing slashes will become inaccessible when
|
||||
using `NormalizePath::default()`. As such, calling `NormalizePath::default()` will log a warning.
|
||||
It is advised that the `new` method be used instead.
|
||||
|
||||
Before: `#[get("/test/")]`
|
||||
After: `#[get("/test")]`
|
||||
|
||||
Alternatively, explicitly require trailing slashes: `NormalizePath::new(TrailingSlash::Always)`.
|
||||
|
||||
- The `type Config` of `FromRequest` was removed.
|
||||
|
||||
- Feature flag `compress` has been split into its supported algorithm (brotli, gzip, zstd).
|
||||
By default all compression algorithms are enabled.
|
||||
To select algorithm you want to include with `middleware::Compress` use following flags:
|
||||
- `compress-brotli`
|
||||
- `compress-gzip`
|
||||
- `compress-zstd`
|
||||
If you have set in your `Cargo.toml` dedicated `actix-web` features and you still want
|
||||
to have compression enabled. Please change features selection like bellow:
|
||||
|
||||
Before: `"compress"`
|
||||
After: `"compress-brotli", "compress-gzip", "compress-zstd"`
|
||||
|
||||
|
||||
## 3.0.0
|
||||
|
||||
- The return type for `ServiceRequest::app_data::<T>()` was changed from returning a `Data<T>` to
|
||||
simply a `T`. To access a `Data<T>` use `ServiceRequest::app_data::<Data<T>>()`.
|
||||
|
||||
- Cookie handling has been offloaded to the `cookie` crate:
|
||||
* `USERINFO_ENCODE_SET` is no longer exposed. Percent-encoding is still supported; check docs.
|
||||
* Some types now require lifetime parameters.
|
||||
|
||||
- The time crate was updated to `v0.2`, a major breaking change to the time crate, which affects
|
||||
any `actix-web` method previously expecting a time v0.1 input.
|
||||
|
||||
- Setting a cookie's SameSite property, explicitly, to `SameSite::None` will now
|
||||
result in `SameSite=None` being sent with the response Set-Cookie header.
|
||||
To create a cookie without a SameSite attribute, remove any calls setting same_site.
|
||||
|
||||
- actix-http support for Actors messages was moved to actix-http crate and is enabled
|
||||
with feature `actors`
|
||||
|
||||
- content_length function is removed from actix-http.
|
||||
You can set Content-Length by normally setting the response body or calling no_chunking function.
|
||||
|
||||
- `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a
|
||||
`u64` instead of a `usize`.
|
||||
|
||||
- Code that was using `path.<index>` to access a `web::Path<(A, B, C)>`s elements now needs to use
|
||||
destructuring or `.into_inner()`. For example:
|
||||
|
||||
```rust
|
||||
// Previously:
|
||||
async fn some_route(path: web::Path<(String, String)>) -> String {
|
||||
format!("Hello, {} {}", path.0, path.1)
|
||||
}
|
||||
|
||||
// Now (this also worked before):
|
||||
async fn some_route(path: web::Path<(String, String)>) -> String {
|
||||
let (first_name, last_name) = path.into_inner();
|
||||
format!("Hello, {} {}", first_name, last_name)
|
||||
}
|
||||
// Or (this wasn't previously supported):
|
||||
async fn some_route(web::Path((first_name, last_name)): web::Path<(String, String)>) -> String {
|
||||
format!("Hello, {} {}", first_name, last_name)
|
||||
}
|
||||
```
|
||||
|
||||
- `middleware::NormalizePath` can now also be configured to trim trailing slashes instead of always keeping one.
|
||||
It will need `middleware::normalize::TrailingSlash` when being constructed with `NormalizePath::new(...)`,
|
||||
or for an easier migration you can replace `wrap(middleware::NormalizePath)` with `wrap(middleware::NormalizePath::new(TrailingSlash::MergeOnly))`.
|
||||
|
||||
- `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`.
|
||||
|
||||
- `HttpServer::maxconnrate` is renamed to the more expressive `HttpServer::max_connection_rate`.
|
||||
|
||||
|
||||
## 2.0.0
|
||||
|
||||
- `HttpServer::start()` renamed to `HttpServer::run()`. It also possible to
|
||||
`.await` on `run` method result, in that case it awaits server exit.
|
||||
|
||||
- `App::register_data()` renamed to `App::app_data()` and accepts any type `T: 'static`.
|
||||
Stored data is available via `HttpRequest::app_data()` method at runtime.
|
||||
|
||||
- Extractor configuration must be registered with `App::app_data()` instead of `App::data()`
|
||||
|
||||
- Sync handlers has been removed. `.to_async()` method has been renamed to `.to()`
|
||||
replace `fn` with `async fn` to convert sync handler to async
|
||||
|
||||
- `actix_http_test::TestServer` moved to `actix_web::test` module. To start
|
||||
test server use `test::start()` or `test_start_with_config()` methods
|
||||
|
||||
- `ResponseError` trait has been reafctored. `ResponseError::error_response()` renders
|
||||
http response.
|
||||
|
||||
- Feature `rust-tls` renamed to `rustls`
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
actix-web = { version = "2.0.0", features = ["rust-tls"] }
|
||||
```
|
||||
|
||||
use
|
||||
|
||||
```rust
|
||||
actix-web = { version = "2.0.0", features = ["rustls"] }
|
||||
```
|
||||
|
||||
- Feature `ssl` renamed to `openssl`
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
actix-web = { version = "2.0.0", features = ["ssl"] }
|
||||
```
|
||||
|
||||
use
|
||||
|
||||
```rust
|
||||
actix-web = { version = "2.0.0", features = ["openssl"] }
|
||||
```
|
||||
- `Cors` builder now requires that you call `.finish()` to construct the middleware
|
||||
|
||||
## 1.0.1
|
||||
|
||||
- Cors middleware has been moved to `actix-cors` crate
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
use actix_web::middleware::cors::Cors;
|
||||
```
|
||||
|
||||
use
|
||||
|
||||
```rust
|
||||
use actix_cors::Cors;
|
||||
```
|
||||
|
||||
- Identity middleware has been moved to `actix-identity` crate
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
use actix_web::middleware::identity::{Identity, CookieIdentityPolicy, IdentityService};
|
||||
```
|
||||
|
||||
use
|
||||
|
||||
```rust
|
||||
use actix_identity::{Identity, CookieIdentityPolicy, IdentityService};
|
||||
```
|
||||
|
||||
|
||||
## 1.0.0
|
||||
|
||||
- Extractor configuration. In version 1.0 this is handled with the new `Data` mechanism for both setting and retrieving the configuration
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
|
||||
#[derive(Default)]
|
||||
struct ExtractorConfig {
|
||||
config: String,
|
||||
}
|
||||
|
||||
impl FromRequest for YourExtractor {
|
||||
type Config = ExtractorConfig;
|
||||
type Result = Result<YourExtractor, Error>;
|
||||
|
||||
fn from_request(req: &HttpRequest, cfg: &Self::Config) -> Self::Result {
|
||||
println!("use the config: {:?}", cfg.config);
|
||||
...
|
||||
}
|
||||
}
|
||||
|
||||
App::new().resource("/route_with_config", |r| {
|
||||
r.post().with_config(handler_fn, |cfg| {
|
||||
cfg.0.config = "test".to_string();
|
||||
})
|
||||
})
|
||||
|
||||
```
|
||||
|
||||
use the HttpRequest to get the configuration like any other `Data` with `req.app_data::<C>()` and set it with the `data()` method on the `resource`
|
||||
|
||||
```rust
|
||||
#[derive(Default)]
|
||||
struct ExtractorConfig {
|
||||
config: String,
|
||||
}
|
||||
|
||||
impl FromRequest for YourExtractor {
|
||||
type Error = Error;
|
||||
type Future = Result<Self, Self::Error>;
|
||||
type Config = ExtractorConfig;
|
||||
|
||||
fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future {
|
||||
let cfg = req.app_data::<ExtractorConfig>();
|
||||
println!("config data?: {:?}", cfg.unwrap().role);
|
||||
...
|
||||
}
|
||||
}
|
||||
|
||||
App::new().service(
|
||||
resource("/route_with_config")
|
||||
.data(ExtractorConfig {
|
||||
config: "test".to_string(),
|
||||
})
|
||||
.route(post().to(handler_fn)),
|
||||
)
|
||||
```
|
||||
|
||||
- Resource registration. 1.0 version uses generalized resource
|
||||
registration via `.service()` method.
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
App.new().resource("/welcome", |r| r.f(welcome))
|
||||
```
|
||||
|
||||
use App's or Scope's `.service()` method. `.service()` method accepts
|
||||
object that implements `HttpServiceFactory` trait. By default
|
||||
actix-web provides `Resource` and `Scope` services.
|
||||
|
||||
```rust
|
||||
App.new().service(
|
||||
web::resource("/welcome")
|
||||
.route(web::get().to(welcome))
|
||||
.route(web::post().to(post_handler))
|
||||
```
|
||||
|
||||
- Scope registration.
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
let app = App::new().scope("/{project_id}", |scope| {
|
||||
scope
|
||||
.resource("/path1", |r| r.f(|_| HttpResponse::Ok()))
|
||||
.resource("/path2", |r| r.f(|_| HttpResponse::Ok()))
|
||||
.resource("/path3", |r| r.f(|_| HttpResponse::MethodNotAllowed()))
|
||||
});
|
||||
```
|
||||
|
||||
use `.service()` for registration and `web::scope()` as scope object factory.
|
||||
|
||||
```rust
|
||||
let app = App::new().service(
|
||||
web::scope("/{project_id}")
|
||||
.service(web::resource("/path1").to(|| HttpResponse::Ok()))
|
||||
.service(web::resource("/path2").to(|| HttpResponse::Ok()))
|
||||
.service(web::resource("/path3").to(|| HttpResponse::MethodNotAllowed()))
|
||||
);
|
||||
```
|
||||
|
||||
- `.with()`, `.with_async()` registration methods have been renamed to `.to()` and `.to_async()`.
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
App.new().resource("/welcome", |r| r.with(welcome))
|
||||
```
|
||||
|
||||
use `.to()` or `.to_async()` methods
|
||||
|
||||
```rust
|
||||
App.new().service(web::resource("/welcome").to(welcome))
|
||||
```
|
||||
|
||||
- Passing arguments to handler with extractors, multiple arguments are allowed
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
fn welcome((body, req): (Bytes, HttpRequest)) -> ... {
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
use multiple arguments
|
||||
|
||||
```rust
|
||||
fn welcome(body: Bytes, req: HttpRequest) -> ... {
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
- `.f()`, `.a()` and `.h()` handler registration methods have been removed.
|
||||
Use `.to()` for handlers and `.to_async()` for async handlers. Handler function
|
||||
must use extractors.
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
App.new().resource("/welcome", |r| r.f(welcome))
|
||||
```
|
||||
|
||||
use App's `to()` or `to_async()` methods
|
||||
|
||||
```rust
|
||||
App.new().service(web::resource("/welcome").to(welcome))
|
||||
```
|
||||
|
||||
- `HttpRequest` does not provide access to request's payload stream.
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
fn index(req: &HttpRequest) -> Box<Future<Item=HttpResponse, Error=Error>> {
|
||||
req
|
||||
.payload()
|
||||
.from_err()
|
||||
.fold((), |_, chunk| {
|
||||
...
|
||||
})
|
||||
.map(|_| HttpResponse::Ok().finish())
|
||||
.responder()
|
||||
}
|
||||
```
|
||||
|
||||
use `Payload` extractor
|
||||
|
||||
```rust
|
||||
fn index(stream: web::Payload) -> impl Future<Item=HttpResponse, Error=Error> {
|
||||
stream
|
||||
.from_err()
|
||||
.fold((), |_, chunk| {
|
||||
...
|
||||
})
|
||||
.map(|_| HttpResponse::Ok().finish())
|
||||
}
|
||||
```
|
||||
|
||||
- `State` is now `Data`. You register Data during the App initialization process
|
||||
and then access it from handlers either using a Data extractor or using
|
||||
HttpRequest's api.
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
App.with_state(T)
|
||||
```
|
||||
|
||||
use App's `data` method
|
||||
|
||||
```rust
|
||||
App.new()
|
||||
.data(T)
|
||||
```
|
||||
|
||||
and either use the Data extractor within your handler
|
||||
|
||||
```rust
|
||||
use actix_web::web::Data;
|
||||
|
||||
fn endpoint_handler(Data<T>)){
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
.. or access your Data element from the HttpRequest
|
||||
|
||||
```rust
|
||||
fn endpoint_handler(req: HttpRequest) {
|
||||
let data: Option<Data<T>> = req.app_data::<T>();
|
||||
}
|
||||
```
|
||||
|
||||
|
||||
- AsyncResponder is removed, use `.to_async()` registration method and `impl Future<>` as result type.
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
use actix_web::AsyncResponder;
|
||||
|
||||
fn endpoint_handler(...) -> impl Future<Item=HttpResponse, Error=Error>{
|
||||
...
|
||||
.responder()
|
||||
}
|
||||
```
|
||||
|
||||
.. simply omit AsyncResponder and the corresponding responder() finish method
|
||||
|
||||
|
||||
- Middleware
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
let app = App::new()
|
||||
.middleware(middleware::Logger::default())
|
||||
```
|
||||
|
||||
use `.wrap()` method
|
||||
|
||||
```rust
|
||||
let app = App::new()
|
||||
.wrap(middleware::Logger::default())
|
||||
.route("/index.html", web::get().to(index));
|
||||
```
|
||||
|
||||
- `HttpRequest::body()`, `HttpRequest::urlencoded()`, `HttpRequest::json()`, `HttpRequest::multipart()`
|
||||
method have been removed. Use `Bytes`, `String`, `Form`, `Json`, `Multipart` extractors instead.
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
fn index(req: &HttpRequest) -> Responder {
|
||||
req.body()
|
||||
.and_then(|body| {
|
||||
...
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
use
|
||||
|
||||
```rust
|
||||
fn index(body: Bytes) -> Responder {
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
- `actix_web::server` module has been removed. To start http server use `actix_web::HttpServer` type
|
||||
|
||||
- StaticFiles and NamedFile have been moved to a separate crate.
|
||||
|
||||
instead of `use actix_web::fs::StaticFile`
|
||||
|
||||
use `use actix_files::Files`
|
||||
|
||||
instead of `use actix_web::fs::Namedfile`
|
||||
|
||||
use `use actix_files::NamedFile`
|
||||
|
||||
- Multipart has been moved to a separate crate.
|
||||
|
||||
instead of `use actix_web::multipart::Multipart`
|
||||
|
||||
use `use actix_multipart::Multipart`
|
||||
|
||||
- Response compression is not enabled by default.
|
||||
To enable, use `Compress` middleware, `App::new().wrap(Compress::default())`.
|
||||
|
||||
- Session middleware moved to actix-session crate
|
||||
|
||||
- Actors support have been moved to `actix-web-actors` crate
|
||||
|
||||
- Custom Error
|
||||
|
||||
Instead of error_response method alone, ResponseError now provides two methods: error_response and render_response respectively. Where, error_response creates the error response and render_response returns the error response to the caller.
|
||||
|
||||
Simplest migration from 0.7 to 1.0 shall include below method to the custom implementation of ResponseError:
|
||||
|
||||
```rust
|
||||
fn render_response(&self) -> HttpResponse {
|
||||
self.error_response()
|
||||
}
|
||||
```
|
||||
|
||||
## 0.7.15
|
||||
|
||||
- The `' '` character is not percent decoded anymore before matching routes. If you need to use it in
|
||||
your routes, you should use `%20`.
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
fn main() {
|
||||
let app = App::new().resource("/my index", |r| {
|
||||
r.method(http::Method::GET)
|
||||
.with(index);
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
use
|
||||
|
||||
```rust
|
||||
fn main() {
|
||||
let app = App::new().resource("/my%20index", |r| {
|
||||
r.method(http::Method::GET)
|
||||
.with(index);
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
- If you used `AsyncResult::async` you need to replace it with `AsyncResult::future`
|
||||
|
||||
|
||||
## 0.7.4
|
||||
|
||||
- `Route::with_config()`/`Route::with_async_config()` always passes configuration objects as tuple
|
||||
even for handler with one parameter.
|
||||
|
||||
|
||||
## 0.7
|
||||
|
||||
- `HttpRequest` does not implement `Stream` anymore. If you need to read request payload
|
||||
use `HttpMessage::payload()` method.
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
fn index(req: HttpRequest) -> impl Responder {
|
||||
req
|
||||
.from_err()
|
||||
.fold(...)
|
||||
....
|
||||
}
|
||||
```
|
||||
|
||||
use `.payload()`
|
||||
|
||||
```rust
|
||||
fn index(req: HttpRequest) -> impl Responder {
|
||||
req
|
||||
.payload() // <- get request payload stream
|
||||
.from_err()
|
||||
.fold(...)
|
||||
....
|
||||
}
|
||||
```
|
||||
|
||||
- [Middleware](https://actix.rs/actix-web/actix_web/middleware/trait.Middleware.html)
|
||||
trait uses `&HttpRequest` instead of `&mut HttpRequest`.
|
||||
|
||||
- Removed `Route::with2()` and `Route::with3()` use tuple of extractors instead.
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
fn index(query: Query<..>, info: Json<MyStruct) -> impl Responder {}
|
||||
```
|
||||
|
||||
use tuple of extractors and use `.with()` for registration:
|
||||
|
||||
```rust
|
||||
fn index((query, json): (Query<..>, Json<MyStruct)) -> impl Responder {}
|
||||
```
|
||||
|
||||
- `Handler::handle()` uses `&self` instead of `&mut self`
|
||||
|
||||
- `Handler::handle()` accepts reference to `HttpRequest<_>` instead of value
|
||||
|
||||
- Removed deprecated `HttpServer::threads()`, use
|
||||
[HttpServer::workers()](https://actix.rs/actix-web/actix_web/server/struct.HttpServer.html#method.workers) instead.
|
||||
|
||||
- Renamed `client::ClientConnectorError::Connector` to
|
||||
`client::ClientConnectorError::Resolver`
|
||||
|
||||
- `Route::with()` does not return `ExtractorConfig`, to configure
|
||||
extractor use `Route::with_config()`
|
||||
|
||||
instead of
|
||||
|
||||
```rust
|
||||
fn main() {
|
||||
let app = App::new().resource("/index.html", |r| {
|
||||
r.method(http::Method::GET)
|
||||
.with(index)
|
||||
.limit(4096); // <- limit size of the payload
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
use
|
||||
|
||||
```rust
|
||||
|
||||
fn main() {
|
||||
let app = App::new().resource("/index.html", |r| {
|
||||
r.method(http::Method::GET)
|
||||
.with_config(index, |cfg| { // <- register handler
|
||||
cfg.limit(4096); // <- limit size of the payload
|
||||
})
|
||||
});
|
||||
}
|
||||
```
|
||||
|
||||
- `Route::with_async()` does not return `ExtractorConfig`, to configure
|
||||
extractor use `Route::with_async_config()`
|
||||
|
||||
|
||||
## 0.6
|
||||
|
||||
- `Path<T>` extractor return `ErrorNotFound` on failure instead of `ErrorBadRequest`
|
||||
|
||||
- `ws::Message::Close` now includes optional close reason.
|
||||
`ws::CloseCode::Status` and `ws::CloseCode::Empty` have been removed.
|
||||
|
||||
- `HttpServer::threads()` renamed to `HttpServer::workers()`.
|
||||
|
||||
- `HttpServer::start_ssl()` and `HttpServer::start_tls()` deprecated.
|
||||
Use `HttpServer::bind_ssl()` and `HttpServer::bind_tls()` instead.
|
||||
|
||||
- `HttpRequest::extensions()` returns read only reference to the request's Extension
|
||||
`HttpRequest::extensions_mut()` returns mutable reference.
|
||||
|
||||
- Instead of
|
||||
|
||||
`use actix_web::middleware::{
|
||||
CookieSessionBackend, CookieSessionError, RequestSession,
|
||||
Session, SessionBackend, SessionImpl, SessionStorage};`
|
||||
|
||||
use `actix_web::middleware::session`
|
||||
|
||||
`use actix_web::middleware::session{CookieSessionBackend, CookieSessionError,
|
||||
RequestSession, Session, SessionBackend, SessionImpl, SessionStorage};`
|
||||
|
||||
- `FromRequest::from_request()` accepts mutable reference to a request
|
||||
|
||||
- `FromRequest::Result` has to implement `Into<Reply<Self>>`
|
||||
|
||||
- [`Responder::respond_to()`](
|
||||
https://actix.rs/actix-web/actix_web/trait.Responder.html#tymethod.respond_to)
|
||||
is generic over `S`
|
||||
|
||||
- Use `Query` extractor instead of HttpRequest::query()`.
|
||||
|
||||
```rust
|
||||
fn index(q: Query<HashMap<String, String>>) -> Result<..> {
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
or
|
||||
|
||||
```rust
|
||||
let q = Query::<HashMap<String, String>>::extract(req);
|
||||
```
|
||||
|
||||
- Websocket operations are implemented as `WsWriter` trait.
|
||||
you need to use `use actix_web::ws::WsWriter`
|
||||
|
||||
|
||||
## 0.5
|
||||
|
||||
- `HttpResponseBuilder::body()`, `.finish()`, `.json()`
|
||||
methods return `HttpResponse` instead of `Result<HttpResponse>`
|
||||
|
||||
- `actix_web::Method`, `actix_web::StatusCode`, `actix_web::Version`
|
||||
moved to `actix_web::http` module
|
||||
|
||||
- `actix_web::header` moved to `actix_web::http::header`
|
||||
|
||||
- `NormalizePath` moved to `actix_web::http` module
|
||||
|
||||
- `HttpServer` moved to `actix_web::server`, added new `actix_web::server::new()` function,
|
||||
shortcut for `actix_web::server::HttpServer::new()`
|
||||
|
||||
- `DefaultHeaders` middleware does not use separate builder, all builder methods moved to type itself
|
||||
|
||||
- `StaticFiles::new()`'s show_index parameter removed, use `show_files_listing()` method instead.
|
||||
|
||||
- `CookieSessionBackendBuilder` removed, all methods moved to `CookieSessionBackend` type
|
||||
|
||||
- `actix_web::httpcodes` module is deprecated, `HttpResponse::Ok()`, `HttpResponse::Found()` and other `HttpResponse::XXX()`
|
||||
functions should be used instead
|
||||
|
||||
- `ClientRequestBuilder::body()` returns `Result<_, actix_web::Error>`
|
||||
instead of `Result<_, http::Error>`
|
||||
|
||||
- `Application` renamed to a `App`
|
||||
|
||||
- `actix_web::Reply`, `actix_web::Resource` moved to `actix_web::dev`
|
109
README.md
109
README.md
@ -1,109 +0,0 @@
|
||||
<div align="center">
|
||||
<h1>Actix Web</h1>
|
||||
<p>
|
||||
<strong>Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust</strong>
|
||||
</p>
|
||||
<p>
|
||||
|
||||
[](https://crates.io/crates/actix-web)
|
||||
[](https://docs.rs/actix-web/4.0.0-beta.21)
|
||||

|
||||

|
||||
[](https://deps.rs/crate/actix-web/4.0.0-beta.21)
|
||||
<br />
|
||||
[](https://github.com/actix/actix-web/actions/workflows/ci.yml)
|
||||
[](https://codecov.io/gh/actix/actix-web)
|
||||

|
||||
[](https://discord.gg/NWpN5mmg3x)
|
||||
|
||||
</p>
|
||||
</div>
|
||||
|
||||
## Features
|
||||
|
||||
- Supports *HTTP/1.x* and *HTTP/2*
|
||||
- Streaming and pipelining
|
||||
- Keep-alive and slow requests handling
|
||||
- Client/server [WebSockets](https://actix.rs/docs/websockets/) support
|
||||
- Transparent content compression/decompression (br, gzip, deflate, zstd)
|
||||
- Powerful [request routing](https://actix.rs/docs/url-dispatch/)
|
||||
- Multipart streams
|
||||
- Static assets
|
||||
- SSL support using OpenSSL or Rustls
|
||||
- Middlewares ([Logger, Session, CORS, etc](https://actix.rs/docs/middleware/))
|
||||
- Includes an async [HTTP client](https://docs.rs/awc/)
|
||||
- Runs on stable Rust 1.54+
|
||||
|
||||
## Documentation
|
||||
|
||||
- [Website & User Guide](https://actix.rs)
|
||||
- [Examples Repository](https://github.com/actix/examples)
|
||||
- [API Documentation](https://docs.rs/actix-web)
|
||||
- [API Documentation (master branch)](https://actix.rs/actix-web/actix_web)
|
||||
|
||||
## Example
|
||||
|
||||
Dependencies:
|
||||
|
||||
```toml
|
||||
[dependencies]
|
||||
actix-web = "3"
|
||||
```
|
||||
|
||||
Code:
|
||||
|
||||
```rust
|
||||
use actix_web::{get, web, App, HttpServer, Responder};
|
||||
|
||||
#[get("/{id}/{name}/index.html")]
|
||||
async fn index(web::Path((id, name)): web::Path<(u32, String)>) -> impl Responder {
|
||||
format!("Hello {}! id:{}", name, id)
|
||||
}
|
||||
|
||||
#[actix_web::main]
|
||||
async fn main() -> std::io::Result<()> {
|
||||
HttpServer::new(|| App::new().service(index))
|
||||
.bind("127.0.0.1:8080")?
|
||||
.run()
|
||||
.await
|
||||
}
|
||||
```
|
||||
|
||||
### More examples
|
||||
|
||||
- [Basic Setup](https://github.com/actix/examples/tree/master/basics/basics/)
|
||||
- [Application State](https://github.com/actix/examples/tree/master/basics/state/)
|
||||
- [JSON Handling](https://github.com/actix/examples/tree/master/json/json/)
|
||||
- [Multipart Streams](https://github.com/actix/examples/tree/master/forms/multipart/)
|
||||
- [Diesel Integration](https://github.com/actix/examples/tree/master/database_interactions/diesel/)
|
||||
- [r2d2 Integration](https://github.com/actix/examples/tree/master/database_interactions/r2d2/)
|
||||
- [Simple WebSocket](https://github.com/actix/examples/tree/master/websockets/websocket/)
|
||||
- [Tera Templates](https://github.com/actix/examples/tree/master/template_engines/tera/)
|
||||
- [Askama Templates](https://github.com/actix/examples/tree/master/template_engines/askama/)
|
||||
- [HTTPS using Rustls](https://github.com/actix/examples/tree/master/security/rustls/)
|
||||
- [HTTPS using OpenSSL](https://github.com/actix/examples/tree/master/security/openssl/)
|
||||
- [WebSocket Chat](https://github.com/actix/examples/tree/master/websockets/chat/)
|
||||
|
||||
You may consider checking out
|
||||
[this directory](https://github.com/actix/examples/tree/master/) for more examples.
|
||||
|
||||
## Benchmarks
|
||||
|
||||
One of the fastest web frameworks available according to the
|
||||
[TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r20&test=composite).
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under either of
|
||||
|
||||
- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
|
||||
[http://www.apache.org/licenses/LICENSE-2.0])
|
||||
- MIT license ([LICENSE-MIT](LICENSE-MIT) or
|
||||
[http://opensource.org/licenses/MIT])
|
||||
|
||||
at your option.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
Contribution to the actix-web repo is organized under the terms of the Contributor Covenant.
|
||||
The Actix team promises to intervene to uphold that code of conduct.
|
@ -1,39 +1,72 @@
|
||||
# Changes
|
||||
|
||||
## Unreleased - 2021-xx-xx
|
||||
## Unreleased - 2023-xx-xx
|
||||
|
||||
## 0.6.3 - 2023-01-21
|
||||
|
||||
- XHTML files now use `Content-Disposition: inline` instead of `attachment`. [#2903]
|
||||
- Minimum supported Rust version (MSRV) is now 1.59 due to transitive `time` dependency.
|
||||
- Update `tokio-uring` dependency to `0.4`.
|
||||
|
||||
[#2903]: https://github.com/actix/actix-web/pull/2903
|
||||
|
||||
## 0.6.2 - 2022-07-23
|
||||
|
||||
- Allow partial range responses for video content to start streaming sooner. [#2817]
|
||||
- Minimum supported Rust version (MSRV) is now 1.57 due to transitive `time` dependency.
|
||||
|
||||
[#2817]: https://github.com/actix/actix-web/pull/2817
|
||||
|
||||
## 0.6.1 - 2022-06-11
|
||||
|
||||
- Add `NamedFile::{modified, metadata, content_type, content_disposition, encoding}()` getters. [#2021]
|
||||
- Update `tokio-uring` dependency to `0.3`.
|
||||
- Audio files now use `Content-Disposition: inline` instead of `attachment`. [#2645]
|
||||
- Minimum supported Rust version (MSRV) is now 1.56 due to transitive `hashbrown` dependency.
|
||||
|
||||
[#2021]: https://github.com/actix/actix-web/pull/2021
|
||||
[#2645]: https://github.com/actix/actix-web/pull/2645
|
||||
|
||||
## 0.6.0 - 2022-02-25
|
||||
|
||||
- No significant changes since `0.6.0-beta.16`.
|
||||
|
||||
## 0.6.0-beta.16 - 2022-01-31
|
||||
|
||||
- No significant changes since `0.6.0-beta.15`.
|
||||
|
||||
## 0.6.0-beta.15 - 2022-01-21
|
||||
|
||||
- No significant changes since `0.6.0-beta.14`.
|
||||
|
||||
|
||||
## 0.6.0-beta.14 - 2022-01-14
|
||||
|
||||
- The `prefer_utf8` option introduced in `0.4.0` is now true by default. [#2583]
|
||||
|
||||
[#2583]: https://github.com/actix/actix-web/pull/2583
|
||||
|
||||
|
||||
## 0.6.0-beta.13 - 2022-01-04
|
||||
|
||||
- The `Files` service now rejects requests with URL paths that include `%2F` (decoded: `/`). [#2398]
|
||||
- The `Files` service now correctly decodes `%25` in the URL path to `%` for the file path. [#2398]
|
||||
- Minimum supported Rust version (MSRV) is now 1.54.
|
||||
|
||||
[#2398]: https://github.com/actix/actix-web/pull/2398
|
||||
|
||||
|
||||
## 0.6.0-beta.12 - 2021-12-29
|
||||
|
||||
- No significant changes since `0.6.0-beta.11`.
|
||||
|
||||
|
||||
## 0.6.0-beta.11 - 2021-12-27
|
||||
|
||||
- No significant changes since `0.6.0-beta.10`.
|
||||
|
||||
|
||||
## 0.6.0-beta.10 - 2021-12-11
|
||||
|
||||
- No significant changes since `0.6.0-beta.9`.
|
||||
|
||||
|
||||
## 0.6.0-beta.9 - 2021-11-22
|
||||
|
||||
- Add crate feature `experimental-io-uring`, enabling async file I/O to be utilized. This feature is only available on Linux OSes with recent kernel versions. This feature is semver-exempt. [#2408]
|
||||
- Add `NamedFile::open_async`. [#2408]
|
||||
- Fix 304 Not Modified responses to omit the Content-Length header, as per the spec. [#2453]
|
||||
@ -44,24 +77,24 @@
|
||||
[#2408]: https://github.com/actix/actix-web/pull/2408
|
||||
[#2453]: https://github.com/actix/actix-web/pull/2453
|
||||
|
||||
|
||||
## 0.6.0-beta.8 - 2021-10-20
|
||||
|
||||
- Minimum supported Rust version (MSRV) is now 1.52.
|
||||
|
||||
|
||||
## 0.6.0-beta.7 - 2021-09-09
|
||||
|
||||
- Minimum supported Rust version (MSRV) is now 1.51.
|
||||
|
||||
|
||||
## 0.6.0-beta.6 - 2021-06-26
|
||||
|
||||
- Added `Files::path_filter()`. [#2274]
|
||||
- `Files::show_files_listing()` can now be used with `Files::index_file()` to show files listing as a fallback when the index file is not found. [#2228]
|
||||
|
||||
[#2274]: https://github.com/actix/actix-web/pull/2274
|
||||
[#2228]: https://github.com/actix/actix-web/pull/2228
|
||||
|
||||
|
||||
## 0.6.0-beta.5 - 2021-06-17
|
||||
|
||||
- `NamedFile` now implements `ServiceFactory` and `HttpServiceFactory` making it much more useful in routing. For example, it can be used directly as a default service. [#2135]
|
||||
- For symbolic links, `Content-Disposition` header no longer shows the filename of the original file. [#2156]
|
||||
- `Files::redirect_to_slash_directory()` now works as expected when used with `Files::show_files_listing()`. [#2225]
|
||||
@ -72,58 +105,58 @@
|
||||
[#2225]: https://github.com/actix/actix-web/pull/2225
|
||||
[#2257]: https://github.com/actix/actix-web/pull/2257
|
||||
|
||||
|
||||
## 0.6.0-beta.4 - 2021-04-02
|
||||
|
||||
- Add support for `.guard` in `Files` to selectively filter `Files` services. [#2046]
|
||||
|
||||
[#2046]: https://github.com/actix/actix-web/pull/2046
|
||||
|
||||
|
||||
## 0.6.0-beta.3 - 2021-03-09
|
||||
|
||||
- No notable changes.
|
||||
|
||||
|
||||
## 0.6.0-beta.2 - 2021-02-10
|
||||
|
||||
- Fix If-Modified-Since and If-Unmodified-Since to not compare using sub-second timestamps. [#1887]
|
||||
- Replace `v_htmlescape` with `askama_escape`. [#1953]
|
||||
|
||||
[#1887]: https://github.com/actix/actix-web/pull/1887
|
||||
[#1953]: https://github.com/actix/actix-web/pull/1953
|
||||
|
||||
|
||||
## 0.6.0-beta.1 - 2021-01-07
|
||||
|
||||
- `HttpRange::parse` now has its own error type.
|
||||
- Update `bytes` to `1.0`. [#1813]
|
||||
|
||||
[#1813]: https://github.com/actix/actix-web/pull/1813
|
||||
|
||||
|
||||
## 0.5.0 - 2020-12-26
|
||||
|
||||
- Optionally support hidden files/directories. [#1811]
|
||||
|
||||
[#1811]: https://github.com/actix/actix-web/pull/1811
|
||||
|
||||
|
||||
## 0.4.1 - 2020-11-24
|
||||
|
||||
- Clarify order of parameters in `Files::new` and improve docs.
|
||||
|
||||
|
||||
## 0.4.0 - 2020-10-06
|
||||
|
||||
- Add `Files::prefer_utf8` option that adds UTF-8 charset on certain response types. [#1714]
|
||||
|
||||
[#1714]: https://github.com/actix/actix-web/pull/1714
|
||||
|
||||
|
||||
## 0.3.0 - 2020-09-11
|
||||
|
||||
- No significant changes from 0.3.0-beta.1.
|
||||
|
||||
|
||||
## 0.3.0-beta.1 - 2020-07-15
|
||||
|
||||
- Update `v_htmlescape` to 0.10
|
||||
- Update `actix-web` and `actix-http` dependencies to beta.1
|
||||
|
||||
|
||||
## 0.3.0-alpha.1 - 2020-05-23
|
||||
|
||||
- Update `actix-web` and `actix-http` dependencies to alpha
|
||||
- Fix some typos in the docs
|
||||
- Bump minimum supported Rust version to 1.40
|
||||
@ -131,73 +164,73 @@
|
||||
|
||||
[#1384]: https://github.com/actix/actix-web/pull/1384
|
||||
|
||||
|
||||
## 0.2.1 - 2019-12-22
|
||||
|
||||
- Use the same format for file URLs regardless of platforms
|
||||
|
||||
|
||||
## 0.2.0 - 2019-12-20
|
||||
|
||||
- Fix BodyEncoding trait import #1220
|
||||
|
||||
|
||||
## 0.2.0-alpha.1 - 2019-12-07
|
||||
|
||||
- Migrate to `std::future`
|
||||
|
||||
|
||||
## 0.1.7 - 2019-11-06
|
||||
- Add an additional `filename*` param in the `Content-Disposition` header of
|
||||
`actix_files::NamedFile` to be more compatible. (#1151)
|
||||
|
||||
- Add an additional `filename*` param in the `Content-Disposition` header of `actix_files::NamedFile` to be more compatible. (#1151)
|
||||
|
||||
## 0.1.6 - 2019-10-14
|
||||
|
||||
- Add option to redirect to a slash-ended path `Files` #1132
|
||||
|
||||
|
||||
## 0.1.5 - 2019-10-08
|
||||
|
||||
- Bump up `mime_guess` crate version to 2.0.1
|
||||
- Bump up `percent-encoding` crate version to 2.1
|
||||
- Allow user defined request guards for `Files` #1113
|
||||
|
||||
|
||||
## 0.1.4 - 2019-07-20
|
||||
|
||||
- Allow to disable `Content-Disposition` header #686
|
||||
|
||||
|
||||
## 0.1.3 - 2019-06-28
|
||||
|
||||
- Do not set `Content-Length` header, let actix-http set it #930
|
||||
|
||||
|
||||
## 0.1.2 - 2019-06-13
|
||||
|
||||
- Content-Length is 0 for NamedFile HEAD request #914
|
||||
- Fix ring dependency from actix-web default features for #741
|
||||
|
||||
|
||||
## 0.1.1 - 2019-06-01
|
||||
|
||||
- Static files are incorrectly served as both chunked and with length #812
|
||||
|
||||
|
||||
## 0.1.0 - 2019-05-25
|
||||
|
||||
- NamedFile last-modified check always fails due to nano-seconds in file modified date #820
|
||||
|
||||
|
||||
## 0.1.0-beta.4 - 2019-05-12
|
||||
|
||||
- Update actix-web to beta.4
|
||||
|
||||
|
||||
## 0.1.0-beta.1 - 2019-04-20
|
||||
|
||||
- Update actix-web to beta.1
|
||||
|
||||
|
||||
## 0.1.0-alpha.6 - 2019-04-14
|
||||
|
||||
- Update actix-web to alpha6
|
||||
|
||||
|
||||
## 0.1.0-alpha.4 - 2019-04-08
|
||||
|
||||
- Update actix-web to alpha4
|
||||
|
||||
|
||||
## 0.1.0-alpha.2 - 2019-04-02
|
||||
|
||||
- Add default handler support
|
||||
|
||||
|
||||
## 0.1.0-alpha.1 - 2019-03-28
|
||||
|
||||
- Initial impl
|
||||
|
@ -1,9 +1,8 @@
|
||||
[package]
|
||||
name = "actix-files"
|
||||
version = "0.6.0-beta.15"
|
||||
version = "0.6.3"
|
||||
authors = [
|
||||
"Nikolay Kim <fafhrd91@gmail.com>",
|
||||
"fakeshadow <24548779@qq.com>",
|
||||
"Rob Ede <robjtede@icloud.com>",
|
||||
]
|
||||
description = "Static file serving for Actix Web"
|
||||
@ -22,27 +21,30 @@ path = "src/lib.rs"
|
||||
experimental-io-uring = ["actix-web/experimental-io-uring", "tokio-uring"]
|
||||
|
||||
[dependencies]
|
||||
actix-http = "3.0.0-beta.19"
|
||||
actix-http = "3"
|
||||
actix-service = "2"
|
||||
actix-utils = "3"
|
||||
actix-web = { version = "4.0.0-beta.21", default-features = false }
|
||||
actix-web = { version = "4", default-features = false }
|
||||
|
||||
askama_escape = "0.10"
|
||||
bitflags = "1"
|
||||
bytes = "1"
|
||||
derive_more = "0.99.5"
|
||||
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
|
||||
futures-core = { version = "0.3.17", default-features = false, features = ["alloc"] }
|
||||
http-range = "0.1.4"
|
||||
log = "0.4"
|
||||
mime = "0.3"
|
||||
mime_guess = "2.0.1"
|
||||
percent-encoding = "2.1"
|
||||
pin-project-lite = "0.2.7"
|
||||
v_htmlescape= "0.15"
|
||||
|
||||
tokio-uring = { version = "0.2", optional = true, features = ["bytes"] }
|
||||
# experimental-io-uring
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
tokio-uring = { version = "0.4", optional = true, features = ["bytes"] }
|
||||
actix-server = { version = "2.2", optional = true } # ensure matching tokio-uring versions
|
||||
|
||||
[dev-dependencies]
|
||||
actix-rt = "2.2"
|
||||
actix-test = "0.1.0-beta.11"
|
||||
actix-web = "4.0.0-beta.21"
|
||||
actix-rt = "2.7"
|
||||
actix-test = "0.1"
|
||||
actix-web = "4"
|
||||
tempfile = "3.2"
|
||||
|
@ -3,16 +3,16 @@
|
||||
> Static file serving for Actix Web
|
||||
|
||||
[](https://crates.io/crates/actix-files)
|
||||
[](https://docs.rs/actix-files/0.6.0-beta.15)
|
||||
[](https://blog.rust-lang.org/2021/05/06/Rust-1.54.0.html)
|
||||
[](https://docs.rs/actix-files/0.6.3)
|
||||

|
||||

|
||||
<br />
|
||||
[](https://deps.rs/crate/actix-files/0.6.0-beta.15)
|
||||
[](https://deps.rs/crate/actix-files/0.6.3)
|
||||
[](https://crates.io/crates/actix-files)
|
||||
[](https://discord.gg/NWpN5mmg3x)
|
||||
|
||||
## Documentation & Resources
|
||||
|
||||
- [API Documentation](https://docs.rs/actix-files/)
|
||||
- [Example Project](https://github.com/actix/examples/tree/master/basics/static_index)
|
||||
- Minimum Supported Rust Version (MSRV): 1.54
|
||||
- [API Documentation](https://docs.rs/actix-files)
|
||||
- [Example Project](https://github.com/actix/examples/tree/master/basics/static-files)
|
||||
- Minimum Supported Rust Version (MSRV): 1.59
|
||||
|
@ -81,7 +81,7 @@ async fn chunked_read_file_callback(
|
||||
) -> Result<(File, Bytes), Error> {
|
||||
use io::{Read as _, Seek as _};
|
||||
|
||||
let res = actix_web::rt::task::spawn_blocking(move || {
|
||||
let res = actix_web::web::block(move || {
|
||||
let mut buf = Vec::with_capacity(max_bytes);
|
||||
|
||||
file.seek(io::SeekFrom::Start(offset))?;
|
||||
@ -94,8 +94,7 @@ async fn chunked_read_file_callback(
|
||||
Ok((file, Bytes::from(buf)))
|
||||
}
|
||||
})
|
||||
.await
|
||||
.map_err(|_| actix_web::error::BlockingError)??;
|
||||
.await??;
|
||||
|
||||
Ok(res)
|
||||
}
|
||||
|
@ -1,8 +1,8 @@
|
||||
use std::{fmt::Write, fs::DirEntry, io, path::Path, path::PathBuf};
|
||||
|
||||
use actix_web::{dev::ServiceResponse, HttpRequest, HttpResponse};
|
||||
use askama_escape::{escape as escape_html_entity, Html};
|
||||
use percent_encoding::{utf8_percent_encode, CONTROLS};
|
||||
use v_htmlescape::escape as escape_html_entity;
|
||||
|
||||
/// A directory; responds with the generated directory listing.
|
||||
#[derive(Debug)]
|
||||
@ -59,7 +59,7 @@ macro_rules! encode_file_url {
|
||||
/// ```
|
||||
macro_rules! encode_file_name {
|
||||
($entry:ident) => {
|
||||
escape_html_entity(&$entry.file_name().to_string_lossy(), Html)
|
||||
escape_html_entity(&$entry.file_name().to_string_lossy())
|
||||
};
|
||||
}
|
||||
|
||||
@ -75,7 +75,7 @@ pub(crate) fn directory_listing(
|
||||
if dir.is_visible(&entry) {
|
||||
let entry = entry.unwrap();
|
||||
let p = match entry.path().strip_prefix(&dir.path) {
|
||||
Ok(p) if cfg!(windows) => base.join(p).to_string_lossy().replace("\\", "/"),
|
||||
Ok(p) if cfg!(windows) => base.join(p).to_string_lossy().replace('\\', "/"),
|
||||
Ok(p) => base.join(p).to_string_lossy().into_owned(),
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
@ -2,7 +2,7 @@ use actix_web::{http::StatusCode, ResponseError};
|
||||
use derive_more::Display;
|
||||
|
||||
/// Errors which can occur when serving static files.
|
||||
#[derive(Display, Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Eq, Display)]
|
||||
pub enum FilesError {
|
||||
/// Path is not a directory
|
||||
#[allow(dead_code)]
|
||||
@ -22,7 +22,7 @@ impl ResponseError for FilesError {
|
||||
}
|
||||
|
||||
#[allow(clippy::enum_variant_names)]
|
||||
#[derive(Display, Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Eq, Display)]
|
||||
#[non_exhaustive]
|
||||
pub enum UriSegmentError {
|
||||
/// The segment started with the wrapped invalid character.
|
||||
|
@ -37,7 +37,7 @@ use crate::{
|
||||
/// .service(Files::new("/static", "."));
|
||||
/// ```
|
||||
pub struct Files {
|
||||
path: String,
|
||||
mount_path: String,
|
||||
directory: PathBuf,
|
||||
index: Option<String>,
|
||||
show_index: bool,
|
||||
@ -68,7 +68,7 @@ impl Clone for Files {
|
||||
default: self.default.clone(),
|
||||
renderer: self.renderer.clone(),
|
||||
file_flags: self.file_flags,
|
||||
path: self.path.clone(),
|
||||
mount_path: self.mount_path.clone(),
|
||||
mime_override: self.mime_override.clone(),
|
||||
path_filter: self.path_filter.clone(),
|
||||
use_guards: self.use_guards.clone(),
|
||||
@ -107,7 +107,7 @@ impl Files {
|
||||
};
|
||||
|
||||
Files {
|
||||
path: mount_path.trim_end_matches('/').to_owned(),
|
||||
mount_path: mount_path.trim_end_matches('/').to_owned(),
|
||||
directory: dir,
|
||||
index: None,
|
||||
show_index: false,
|
||||
@ -142,7 +142,7 @@ impl Files {
|
||||
self
|
||||
}
|
||||
|
||||
/// Set custom directory renderer
|
||||
/// Set custom directory renderer.
|
||||
pub fn files_listing_renderer<F>(mut self, f: F) -> Self
|
||||
where
|
||||
for<'r, 's> F:
|
||||
@ -152,7 +152,7 @@ impl Files {
|
||||
self
|
||||
}
|
||||
|
||||
/// Specifies mime override callback
|
||||
/// Specifies MIME override callback.
|
||||
pub fn mime_override<F>(mut self, f: F) -> Self
|
||||
where
|
||||
F: Fn(&mime::Name<'_>) -> DispositionType + 'static,
|
||||
@ -342,9 +342,9 @@ impl HttpServiceFactory for Files {
|
||||
}
|
||||
|
||||
let rdef = if config.is_root() {
|
||||
ResourceDef::root_prefix(&self.path)
|
||||
ResourceDef::root_prefix(&self.mount_path)
|
||||
} else {
|
||||
ResourceDef::prefix(&self.path)
|
||||
ResourceDef::prefix(&self.mount_path)
|
||||
};
|
||||
|
||||
config.register_service(rdef, guards, self, None)
|
||||
@ -390,3 +390,42 @@ impl ServiceFactory<ServiceRequest> for Files {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use actix_web::{
|
||||
http::StatusCode,
|
||||
test::{self, TestRequest},
|
||||
App, HttpResponse,
|
||||
};
|
||||
|
||||
use super::*;
|
||||
|
||||
#[actix_web::test]
|
||||
async fn custom_files_listing_renderer() {
|
||||
let srv = test::init_service(
|
||||
App::new().service(
|
||||
Files::new("/", "./tests")
|
||||
.show_files_listing()
|
||||
.files_listing_renderer(|dir, req| {
|
||||
Ok(ServiceResponse::new(
|
||||
req.clone(),
|
||||
HttpResponse::Ok().body(dir.path.to_str().unwrap().to_owned()),
|
||||
))
|
||||
}),
|
||||
),
|
||||
)
|
||||
.await;
|
||||
|
||||
let req = TestRequest::with_uri("/").to_request();
|
||||
let res = test::call_service(&srv, req).await;
|
||||
|
||||
assert_eq!(res.status(), StatusCode::OK);
|
||||
let body = test::read_body(res).await;
|
||||
assert!(
|
||||
body.ends_with(b"actix-files/tests/"),
|
||||
"body {:?} does not end with `actix-files/tests/`",
|
||||
body
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -13,6 +13,7 @@
|
||||
|
||||
#![deny(rust_2018_idioms, nonstandard_style)]
|
||||
#![warn(future_incompatible, missing_docs, missing_debug_implementations)]
|
||||
#![allow(clippy::uninlined_format_args)]
|
||||
|
||||
use actix_service::boxed::{BoxService, BoxServiceFactory};
|
||||
use actix_web::{
|
||||
@ -106,7 +107,7 @@ mod tests {
|
||||
let req = TestRequest::default()
|
||||
.insert_header((header::IF_MODIFIED_SINCE, since))
|
||||
.to_http_request();
|
||||
let resp = file.respond_to(&req).await.unwrap();
|
||||
let resp = file.respond_to(&req);
|
||||
assert_eq!(resp.status(), StatusCode::NOT_MODIFIED);
|
||||
}
|
||||
|
||||
@ -118,7 +119,7 @@ mod tests {
|
||||
let req = TestRequest::default()
|
||||
.insert_header((header::IF_MODIFIED_SINCE, since))
|
||||
.to_http_request();
|
||||
let resp = file.respond_to(&req).await.unwrap();
|
||||
let resp = file.respond_to(&req);
|
||||
assert_eq!(resp.status(), StatusCode::NOT_MODIFIED);
|
||||
}
|
||||
|
||||
@ -131,7 +132,7 @@ mod tests {
|
||||
.insert_header((header::IF_NONE_MATCH, "miss_etag"))
|
||||
.insert_header((header::IF_MODIFIED_SINCE, since))
|
||||
.to_http_request();
|
||||
let resp = file.respond_to(&req).await.unwrap();
|
||||
let resp = file.respond_to(&req);
|
||||
assert_ne!(resp.status(), StatusCode::NOT_MODIFIED);
|
||||
}
|
||||
|
||||
@ -143,7 +144,7 @@ mod tests {
|
||||
let req = TestRequest::default()
|
||||
.insert_header((header::IF_UNMODIFIED_SINCE, since))
|
||||
.to_http_request();
|
||||
let resp = file.respond_to(&req).await.unwrap();
|
||||
let resp = file.respond_to(&req);
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
}
|
||||
|
||||
@ -155,7 +156,7 @@ mod tests {
|
||||
let req = TestRequest::default()
|
||||
.insert_header((header::IF_UNMODIFIED_SINCE, since))
|
||||
.to_http_request();
|
||||
let resp = file.respond_to(&req).await.unwrap();
|
||||
let resp = file.respond_to(&req);
|
||||
assert_eq!(resp.status(), StatusCode::PRECONDITION_FAILED);
|
||||
}
|
||||
|
||||
@ -172,7 +173,7 @@ mod tests {
|
||||
}
|
||||
|
||||
let req = TestRequest::default().to_http_request();
|
||||
let resp = file.respond_to(&req).await.unwrap();
|
||||
let resp = file.respond_to(&req);
|
||||
assert_eq!(
|
||||
resp.headers().get(header::CONTENT_TYPE).unwrap(),
|
||||
"text/x-toml"
|
||||
@ -196,7 +197,7 @@ mod tests {
|
||||
}
|
||||
|
||||
let req = TestRequest::default().to_http_request();
|
||||
let resp = file.respond_to(&req).await.unwrap();
|
||||
let resp = file.respond_to(&req);
|
||||
assert_eq!(
|
||||
resp.headers().get(header::CONTENT_DISPOSITION).unwrap(),
|
||||
"inline; filename=\"Cargo.toml\""
|
||||
@ -207,7 +208,7 @@ mod tests {
|
||||
.unwrap()
|
||||
.disable_content_disposition();
|
||||
let req = TestRequest::default().to_http_request();
|
||||
let resp = file.respond_to(&req).await.unwrap();
|
||||
let resp = file.respond_to(&req);
|
||||
assert!(resp.headers().get(header::CONTENT_DISPOSITION).is_none());
|
||||
}
|
||||
|
||||
@ -235,7 +236,7 @@ mod tests {
|
||||
}
|
||||
|
||||
let req = TestRequest::default().to_http_request();
|
||||
let resp = file.respond_to(&req).await.unwrap();
|
||||
let resp = file.respond_to(&req);
|
||||
assert_eq!(
|
||||
resp.headers().get(header::CONTENT_TYPE).unwrap(),
|
||||
"text/x-toml"
|
||||
@ -261,7 +262,7 @@ mod tests {
|
||||
}
|
||||
|
||||
let req = TestRequest::default().to_http_request();
|
||||
let resp = file.respond_to(&req).await.unwrap();
|
||||
let resp = file.respond_to(&req);
|
||||
assert_eq!(
|
||||
resp.headers().get(header::CONTENT_TYPE).unwrap(),
|
||||
"text/xml"
|
||||
@ -284,7 +285,7 @@ mod tests {
|
||||
}
|
||||
|
||||
let req = TestRequest::default().to_http_request();
|
||||
let resp = file.respond_to(&req).await.unwrap();
|
||||
let resp = file.respond_to(&req);
|
||||
assert_eq!(
|
||||
resp.headers().get(header::CONTENT_TYPE).unwrap(),
|
||||
"image/png"
|
||||
@ -300,7 +301,7 @@ mod tests {
|
||||
let file = NamedFile::open_async("tests/test.js").await.unwrap();
|
||||
|
||||
let req = TestRequest::default().to_http_request();
|
||||
let resp = file.respond_to(&req).await.unwrap();
|
||||
let resp = file.respond_to(&req);
|
||||
assert_eq!(
|
||||
resp.headers().get(header::CONTENT_TYPE).unwrap(),
|
||||
"application/javascript; charset=utf-8"
|
||||
@ -330,7 +331,7 @@ mod tests {
|
||||
}
|
||||
|
||||
let req = TestRequest::default().to_http_request();
|
||||
let resp = file.respond_to(&req).await.unwrap();
|
||||
let resp = file.respond_to(&req);
|
||||
assert_eq!(
|
||||
resp.headers().get(header::CONTENT_TYPE).unwrap(),
|
||||
"image/png"
|
||||
@ -353,7 +354,7 @@ mod tests {
|
||||
}
|
||||
|
||||
let req = TestRequest::default().to_http_request();
|
||||
let resp = file.respond_to(&req).await.unwrap();
|
||||
let resp = file.respond_to(&req);
|
||||
assert_eq!(
|
||||
resp.headers().get(header::CONTENT_TYPE).unwrap(),
|
||||
"application/octet-stream"
|
||||
@ -364,22 +365,45 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[allow(deprecated)]
|
||||
#[actix_rt::test]
|
||||
async fn test_named_file_status_code_text() {
|
||||
let mut file = NamedFile::open_async("Cargo.toml")
|
||||
async fn status_code_customize_same_output() {
|
||||
let file1 = NamedFile::open_async("Cargo.toml")
|
||||
.await
|
||||
.unwrap()
|
||||
.set_status_code(StatusCode::NOT_FOUND);
|
||||
|
||||
let file2 = NamedFile::open_async("Cargo.toml")
|
||||
.await
|
||||
.unwrap()
|
||||
.customize()
|
||||
.with_status(StatusCode::NOT_FOUND);
|
||||
|
||||
let req = TestRequest::default().to_http_request();
|
||||
let res1 = file1.respond_to(&req);
|
||||
let res2 = file2.respond_to(&req);
|
||||
|
||||
assert_eq!(res1.status(), StatusCode::NOT_FOUND);
|
||||
assert_eq!(res2.status(), StatusCode::NOT_FOUND);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_named_file_status_code_text() {
|
||||
let mut file = NamedFile::open_async("Cargo.toml").await.unwrap();
|
||||
|
||||
{
|
||||
file.file();
|
||||
let _f: &File = &file;
|
||||
}
|
||||
|
||||
{
|
||||
let _f: &mut File = &mut file;
|
||||
}
|
||||
|
||||
let file = file.customize().with_status(StatusCode::NOT_FOUND);
|
||||
|
||||
let req = TestRequest::default().to_http_request();
|
||||
let resp = file.respond_to(&req).await.unwrap();
|
||||
let resp = file.respond_to(&req);
|
||||
assert_eq!(
|
||||
resp.headers().get(header::CONTENT_TYPE).unwrap(),
|
||||
"text/x-toml"
|
||||
@ -633,7 +657,7 @@ mod tests {
|
||||
async fn test_named_file_allowed_method() {
|
||||
let req = TestRequest::default().method(Method::GET).to_http_request();
|
||||
let file = NamedFile::open_async("Cargo.toml").await.unwrap();
|
||||
let resp = file.respond_to(&req).await.unwrap();
|
||||
let resp = file.respond_to(&req);
|
||||
assert_eq!(resp.status(), StatusCode::OK);
|
||||
}
|
||||
|
||||
|
@ -23,6 +23,7 @@ use actix_web::{
|
||||
use bitflags::bitflags;
|
||||
use derive_more::{Deref, DerefMut};
|
||||
use futures_core::future::LocalBoxFuture;
|
||||
use mime::Mime;
|
||||
use mime_guess::from_path;
|
||||
|
||||
use crate::{encoding::equiv_utf8_text, range::HttpRange};
|
||||
@ -76,8 +77,8 @@ pub struct NamedFile {
|
||||
pub(crate) md: Metadata,
|
||||
pub(crate) flags: Flags,
|
||||
pub(crate) status_code: StatusCode,
|
||||
pub(crate) content_type: mime::Mime,
|
||||
pub(crate) content_disposition: header::ContentDisposition,
|
||||
pub(crate) content_type: Mime,
|
||||
pub(crate) content_disposition: ContentDisposition,
|
||||
pub(crate) encoding: Option<ContentEncoding>,
|
||||
}
|
||||
|
||||
@ -96,18 +97,18 @@ impl NamedFile {
|
||||
///
|
||||
/// # Examples
|
||||
/// ```ignore
|
||||
/// use std::{
|
||||
/// io::{self, Write as _},
|
||||
/// env,
|
||||
/// fs::File
|
||||
/// };
|
||||
/// use actix_files::NamedFile;
|
||||
/// use std::io::{self, Write};
|
||||
/// use std::env;
|
||||
/// use std::fs::File;
|
||||
///
|
||||
/// fn main() -> io::Result<()> {
|
||||
/// let mut file = File::create("foo.txt")?;
|
||||
/// file.write_all(b"Hello, world!")?;
|
||||
/// let named_file = NamedFile::from_file(file, "bar.txt")?;
|
||||
/// # std::fs::remove_file("foo.txt");
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// let mut file = File::create("foo.txt")?;
|
||||
/// file.write_all(b"Hello, world!")?;
|
||||
/// let named_file = NamedFile::from_file(file, "bar.txt")?;
|
||||
/// # std::fs::remove_file("foo.txt");
|
||||
/// Ok(())
|
||||
/// ```
|
||||
pub fn from_file<P: AsRef<Path>>(file: File, path: P) -> io::Result<NamedFile> {
|
||||
let path = path.as_ref().to_path_buf();
|
||||
@ -128,10 +129,10 @@ impl NamedFile {
|
||||
let ct = from_path(&path).first_or_octet_stream();
|
||||
|
||||
let disposition = match ct.type_() {
|
||||
mime::IMAGE | mime::TEXT | mime::VIDEO => DispositionType::Inline,
|
||||
mime::IMAGE | mime::TEXT | mime::AUDIO | mime::VIDEO => DispositionType::Inline,
|
||||
mime::APPLICATION => match ct.subtype() {
|
||||
mime::JAVASCRIPT | mime::JSON => DispositionType::Inline,
|
||||
name if name == "wasm" => DispositionType::Inline,
|
||||
name if name == "wasm" || name == "xhtml" => DispositionType::Inline,
|
||||
_ => DispositionType::Attachment,
|
||||
},
|
||||
_ => DispositionType::Attachment,
|
||||
@ -211,8 +212,8 @@ impl NamedFile {
|
||||
|
||||
/// Attempts to open a file asynchronously in read-only mode.
|
||||
///
|
||||
/// When the `experimental-io-uring` crate feature is enabled, this will be async.
|
||||
/// Otherwise, it will be just like [`open`][Self::open].
|
||||
/// When the `experimental-io-uring` crate feature is enabled, this will be async. Otherwise, it
|
||||
/// will behave just like `open`.
|
||||
///
|
||||
/// # Examples
|
||||
/// ```
|
||||
@ -237,13 +238,13 @@ impl NamedFile {
|
||||
Self::from_file(file, path)
|
||||
}
|
||||
|
||||
/// Returns reference to the underlying `File` object.
|
||||
/// Returns reference to the underlying file object.
|
||||
#[inline]
|
||||
pub fn file(&self) -> &File {
|
||||
&self.file
|
||||
}
|
||||
|
||||
/// Retrieve the path of this file.
|
||||
/// Returns the filesystem path to this file.
|
||||
///
|
||||
/// # Examples
|
||||
/// ```
|
||||
@ -261,16 +262,53 @@ impl NamedFile {
|
||||
self.path.as_path()
|
||||
}
|
||||
|
||||
/// Set response **Status Code**
|
||||
/// Returns the time the file was last modified.
|
||||
///
|
||||
/// Returns `None` only on unsupported platforms; see [`std::fs::Metadata::modified()`].
|
||||
/// Therefore, it is usually safe to unwrap this.
|
||||
#[inline]
|
||||
pub fn modified(&self) -> Option<SystemTime> {
|
||||
self.modified
|
||||
}
|
||||
|
||||
/// Returns the filesystem metadata associated with this file.
|
||||
#[inline]
|
||||
pub fn metadata(&self) -> &Metadata {
|
||||
&self.md
|
||||
}
|
||||
|
||||
/// Returns the `Content-Type` header that will be used when serving this file.
|
||||
#[inline]
|
||||
pub fn content_type(&self) -> &Mime {
|
||||
&self.content_type
|
||||
}
|
||||
|
||||
/// Returns the `Content-Disposition` that will be used when serving this file.
|
||||
#[inline]
|
||||
pub fn content_disposition(&self) -> &ContentDisposition {
|
||||
&self.content_disposition
|
||||
}
|
||||
|
||||
/// Returns the `Content-Encoding` that will be used when serving this file.
|
||||
///
|
||||
/// A return value of `None` indicates that the content is not already using a compressed
|
||||
/// representation and may be subject to compression downstream.
|
||||
#[inline]
|
||||
pub fn content_encoding(&self) -> Option<ContentEncoding> {
|
||||
self.encoding
|
||||
}
|
||||
|
||||
/// Set response status code.
|
||||
#[deprecated(since = "0.7.0", note = "Prefer `Responder::customize()`.")]
|
||||
pub fn set_status_code(mut self, status: StatusCode) -> Self {
|
||||
self.status_code = status;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the MIME Content-Type for serving this file. By default the Content-Type is inferred
|
||||
/// from the filename extension.
|
||||
/// Sets the `Content-Type` header that will be used when serving this file. By default the
|
||||
/// `Content-Type` is inferred from the filename extension.
|
||||
#[inline]
|
||||
pub fn set_content_type(mut self, mime_type: mime::Mime) -> Self {
|
||||
pub fn set_content_type(mut self, mime_type: Mime) -> Self {
|
||||
self.content_type = mime_type;
|
||||
self
|
||||
}
|
||||
@ -283,24 +321,26 @@ impl NamedFile {
|
||||
/// filename is taken from the path provided in the `open` method after converting it to UTF-8
|
||||
/// (using `to_string_lossy`).
|
||||
#[inline]
|
||||
pub fn set_content_disposition(mut self, cd: header::ContentDisposition) -> Self {
|
||||
pub fn set_content_disposition(mut self, cd: ContentDisposition) -> Self {
|
||||
self.content_disposition = cd;
|
||||
self.flags.insert(Flags::CONTENT_DISPOSITION);
|
||||
self
|
||||
}
|
||||
|
||||
/// Disable `Content-Disposition` header.
|
||||
/// Disables `Content-Disposition` header.
|
||||
///
|
||||
/// By default Content-Disposition` header is enabled.
|
||||
/// By default, the `Content-Disposition` header is sent.
|
||||
#[inline]
|
||||
pub fn disable_content_disposition(mut self) -> Self {
|
||||
self.flags.remove(Flags::CONTENT_DISPOSITION);
|
||||
self
|
||||
}
|
||||
|
||||
/// Set content encoding for serving this file
|
||||
/// Sets content encoding for this file.
|
||||
///
|
||||
/// Must be used with [`actix_web::middleware::Compress`] to take effect.
|
||||
/// This prevents the `Compress` middleware from modifying the file contents and signals to
|
||||
/// browsers/clients how to decode it. For example, if serving a compressed HTML file (e.g.,
|
||||
/// `index.html.gz`) then use `.set_content_encoding(ContentEncoding::Gzip)`.
|
||||
#[inline]
|
||||
pub fn set_content_encoding(mut self, enc: ContentEncoding) -> Self {
|
||||
self.encoding = Some(enc);
|
||||
@ -488,11 +528,26 @@ impl NamedFile {
|
||||
length = ranges[0].length;
|
||||
offset = ranges[0].start;
|
||||
|
||||
// don't allow compression middleware to modify partial content
|
||||
res.insert_header((
|
||||
header::CONTENT_ENCODING,
|
||||
HeaderValue::from_static("identity"),
|
||||
));
|
||||
// When a Content-Encoding header is present in a 206 partial content response
|
||||
// for video content, it prevents browser video players from starting playback
|
||||
// before loading the whole video and also prevents seeking.
|
||||
//
|
||||
// See: https://github.com/actix/actix-web/issues/2815
|
||||
//
|
||||
// The assumption of this fix is that the video player knows to not send an
|
||||
// Accept-Encoding header for this request and that downstream middleware will
|
||||
// not attempt compression for requests without it.
|
||||
//
|
||||
// TODO: Solve question around what to do if self.encoding is set and partial
|
||||
// range is requested. Reject request? Ignoring self.encoding seems wrong, too.
|
||||
// In practice, it should not come up.
|
||||
if req.headers().contains_key(&header::ACCEPT_ENCODING) {
|
||||
// don't allow compression middleware to modify partial content
|
||||
res.insert_header((
|
||||
header::CONTENT_ENCODING,
|
||||
HeaderValue::from_static("identity"),
|
||||
));
|
||||
}
|
||||
|
||||
res.insert_header((
|
||||
header::CONTENT_RANGE,
|
||||
|
@ -30,7 +30,7 @@ impl PathBufWrap {
|
||||
let mut segment_count = path.matches('/').count() + 1;
|
||||
|
||||
// we can decode the whole path here (instead of per-segment decoding)
|
||||
// because we will reject `%2F` in paths using `segement_count`.
|
||||
// because we will reject `%2F` in paths using `segment_count`.
|
||||
let path = percent_encoding::percent_decode_str(path)
|
||||
.decode_utf8()
|
||||
.map_err(|_| UriSegmentError::NotValidUtf8)?;
|
||||
@ -59,6 +59,8 @@ impl PathBufWrap {
|
||||
continue;
|
||||
} else if cfg!(windows) && segment.contains('\\') {
|
||||
return Err(UriSegmentError::BadChar('\\'));
|
||||
} else if cfg!(windows) && segment.contains(':') {
|
||||
return Err(UriSegmentError::BadChar(':'));
|
||||
} else {
|
||||
buf.push(segment)
|
||||
}
|
||||
@ -66,7 +68,11 @@ impl PathBufWrap {
|
||||
|
||||
// make sure we agree with stdlib parser
|
||||
for (i, component) in buf.components().enumerate() {
|
||||
assert!(matches!(component, Component::Normal(_)));
|
||||
assert!(
|
||||
matches!(component, Component::Normal(_)),
|
||||
"component `{:?}` is not normal",
|
||||
component
|
||||
);
|
||||
assert!(i < segment_count);
|
||||
}
|
||||
|
||||
@ -159,4 +165,26 @@ mod tests {
|
||||
PathBuf::from_iter(vec!["etc/passwd"])
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[cfg_attr(windows, should_panic)]
|
||||
fn windows_drive_traversal() {
|
||||
// detect issues in windows that could lead to path traversal
|
||||
// see <https://github.com/SergioBenitez/Rocket/issues/1949
|
||||
|
||||
assert_eq!(
|
||||
PathBufWrap::parse_path("C:test.txt", false).unwrap().0,
|
||||
PathBuf::from_iter(vec!["C:test.txt"])
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
PathBufWrap::parse_path("C:../whatever", false).unwrap().0,
|
||||
PathBuf::from_iter(vec!["C:../whatever"])
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
PathBufWrap::parse_path(":test.txt", false).unwrap().0,
|
||||
PathBuf::from_iter(vec![":test.txt"])
|
||||
);
|
||||
}
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ impl Deref for FilesService {
|
||||
type Target = FilesServiceInner;
|
||||
|
||||
fn deref(&self) -> &Self::Target {
|
||||
&*self.0
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
@ -168,7 +168,7 @@ impl Service<ServiceRequest> for FilesService {
|
||||
}
|
||||
}
|
||||
None if this.show_index => Ok(this.show_index(req, path)),
|
||||
_ => Ok(ServiceResponse::from_err(
|
||||
None => Ok(ServiceResponse::from_err(
|
||||
FilesError::IsDirectory,
|
||||
req.into_parts().0,
|
||||
)),
|
||||
|
@ -1,11 +1,11 @@
|
||||
use actix_files::Files;
|
||||
use actix_files::{Files, NamedFile};
|
||||
use actix_web::{
|
||||
http::{
|
||||
header::{self, HeaderValue},
|
||||
StatusCode,
|
||||
},
|
||||
test::{self, TestRequest},
|
||||
App,
|
||||
web, App,
|
||||
};
|
||||
|
||||
#[actix_web::test]
|
||||
@ -36,3 +36,31 @@ async fn test_utf8_file_contents() {
|
||||
Some(&HeaderValue::from_static("text/plain")),
|
||||
);
|
||||
}
|
||||
|
||||
#[actix_web::test]
|
||||
async fn partial_range_response_encoding() {
|
||||
let srv = test::init_service(App::new().default_service(web::to(|| async {
|
||||
NamedFile::open_async("./tests/test.binary").await.unwrap()
|
||||
})))
|
||||
.await;
|
||||
|
||||
// range request without accept-encoding returns no content-encoding header
|
||||
let req = TestRequest::with_uri("/")
|
||||
.append_header((header::RANGE, "bytes=10-20"))
|
||||
.to_request();
|
||||
let res = test::call_service(&srv, req).await;
|
||||
assert_eq!(res.status(), StatusCode::PARTIAL_CONTENT);
|
||||
assert!(!res.headers().contains_key(header::CONTENT_ENCODING));
|
||||
|
||||
// range request with accept-encoding returns a content-encoding header
|
||||
let req = TestRequest::with_uri("/")
|
||||
.append_header((header::RANGE, "bytes=10-20"))
|
||||
.append_header((header::ACCEPT_ENCODING, "identity"))
|
||||
.to_request();
|
||||
let res = test::call_service(&srv, req).await;
|
||||
assert_eq!(res.status(), StatusCode::PARTIAL_CONTENT);
|
||||
assert_eq!(
|
||||
res.headers().get(header::CONTENT_ENCODING).unwrap(),
|
||||
"identity"
|
||||
);
|
||||
}
|
||||
|
@ -1,67 +1,97 @@
|
||||
# Changes
|
||||
|
||||
## Unreleased - 2021-xx-xx
|
||||
## Unreleased - 2023-xx-xx
|
||||
|
||||
## 3.1.0 - 2023-01-21
|
||||
|
||||
- Minimum supported Rust version (MSRV) is now 1.59.
|
||||
|
||||
## 3.0.0 - 2022-07-24
|
||||
|
||||
- `TestServer::stop` is now async and will wait for the server and system to shutdown. [#2442]
|
||||
- Added `TestServer::client_headers` method. [#2097]
|
||||
- Update `actix-server` dependency to `2`.
|
||||
- Update `actix-tls` dependency to `3`.
|
||||
- Update `bytes` to `1.0`. [#1813]
|
||||
- Minimum supported Rust version (MSRV) is now 1.57.
|
||||
|
||||
[#2442]: https://github.com/actix/actix-web/pull/2442
|
||||
[#2097]: https://github.com/actix/actix-web/pull/2097
|
||||
[#1813]: https://github.com/actix/actix-web/pull/1813
|
||||
|
||||
<details>
|
||||
<summary>3.0.0 Pre-Releases</summary>
|
||||
|
||||
## 3.0.0-beta.13 - 2022-02-16
|
||||
|
||||
- No significant changes since `3.0.0-beta.12`.
|
||||
|
||||
## 3.0.0-beta.12 - 2022-01-31
|
||||
|
||||
- No significant changes since `3.0.0-beta.11`.
|
||||
|
||||
## 3.0.0-beta.11 - 2022-01-04
|
||||
|
||||
- Minimum supported Rust version (MSRV) is now 1.54.
|
||||
|
||||
|
||||
## 3.0.0-beta.10 - 2021-12-27
|
||||
|
||||
- Update `actix-server` to `2.0.0-rc.2`. [#2550]
|
||||
|
||||
[#2550]: https://github.com/actix/actix-web/pull/2550
|
||||
|
||||
|
||||
## 3.0.0-beta.9 - 2021-12-11
|
||||
|
||||
- No significant changes since `3.0.0-beta.8`.
|
||||
|
||||
|
||||
## 3.0.0-beta.8 - 2021-11-30
|
||||
|
||||
- Update `actix-tls` to `3.0.0-rc.1`. [#2474]
|
||||
|
||||
[#2474]: https://github.com/actix/actix-web/pull/2474
|
||||
|
||||
|
||||
## 3.0.0-beta.7 - 2021-11-22
|
||||
|
||||
- Fix compatibility with experimental `io-uring` feature of `actix-rt`. [#2408]
|
||||
|
||||
[#2408]: https://github.com/actix/actix-web/pull/2408
|
||||
|
||||
|
||||
## 3.0.0-beta.6 - 2021-11-15
|
||||
|
||||
- `TestServer::stop` is now async and will wait for the server and system to shutdown. [#2442]
|
||||
- Update `actix-server` to `2.0.0-beta.9`. [#2442]
|
||||
- Minimum supported Rust version (MSRV) is now 1.52.
|
||||
|
||||
[#2442]: https://github.com/actix/actix-web/pull/2442
|
||||
|
||||
|
||||
## 3.0.0-beta.5 - 2021-09-09
|
||||
|
||||
- Minimum supported Rust version (MSRV) is now 1.51.
|
||||
|
||||
|
||||
## 3.0.0-beta.4 - 2021-04-02
|
||||
|
||||
- Added `TestServer::client_headers` method. [#2097]
|
||||
|
||||
[#2097]: https://github.com/actix/actix-web/pull/2097
|
||||
|
||||
|
||||
## 3.0.0-beta.3 - 2021-03-09
|
||||
- No notable changes.
|
||||
|
||||
- No notable changes.
|
||||
|
||||
## 3.0.0-beta.2 - 2021-02-10
|
||||
|
||||
- No notable changes.
|
||||
|
||||
|
||||
## 3.0.0-beta.1 - 2021-01-07
|
||||
|
||||
- Update `bytes` to `1.0`. [#1813]
|
||||
|
||||
[#1813]: https://github.com/actix/actix-web/pull/1813
|
||||
|
||||
</details>
|
||||
|
||||
## 2.1.0 - 2020-11-25
|
||||
|
||||
- Add ability to set address for `TestServer`. [#1645]
|
||||
- Upgrade `base64` to `0.13`.
|
||||
- Upgrade `serde_urlencoded` to `0.7`. [#1773]
|
||||
@ -69,12 +99,12 @@
|
||||
[#1773]: https://github.com/actix/actix-web/pull/1773
|
||||
[#1645]: https://github.com/actix/actix-web/pull/1645
|
||||
|
||||
|
||||
## 2.0.0 - 2020-09-11
|
||||
|
||||
- Update actix-codec and actix-utils dependencies.
|
||||
|
||||
|
||||
## 2.0.0-alpha.1 - 2020-05-23
|
||||
|
||||
- Update the `time` dependency to 0.2.7
|
||||
- Update `actix-connect` dependency to 2.0.0-alpha.2
|
||||
- Make `test_server` `async` fn.
|
||||
@ -84,55 +114,56 @@
|
||||
- Update `env_logger` dependency to 0.7
|
||||
|
||||
## 1.0.0 - 2019-12-13
|
||||
|
||||
- Replaced `TestServer::start()` with `test_server()`
|
||||
|
||||
|
||||
## 1.0.0-alpha.3 - 2019-12-07
|
||||
|
||||
- Migrate to `std::future`
|
||||
|
||||
|
||||
## 0.2.5 - 2019-09-17
|
||||
|
||||
- Update serde_urlencoded to "0.6.1"
|
||||
- Increase TestServerRuntime timeouts from 500ms to 3000ms
|
||||
- Do not override current `System`
|
||||
|
||||
|
||||
## 0.2.4 - 2019-07-18
|
||||
|
||||
- Update actix-server to 0.6
|
||||
|
||||
|
||||
## 0.2.3 - 2019-07-16
|
||||
|
||||
- Add `delete`, `options`, `patch` methods to `TestServerRunner`
|
||||
|
||||
|
||||
## 0.2.2 - 2019-06-16
|
||||
|
||||
- Add .put() and .sput() methods
|
||||
|
||||
|
||||
## 0.2.1 - 2019-06-05
|
||||
|
||||
- Add license files
|
||||
|
||||
|
||||
## 0.2.0 - 2019-05-12
|
||||
|
||||
- Update awc and actix-http deps
|
||||
|
||||
|
||||
## 0.1.1 - 2019-04-24
|
||||
|
||||
- Always make new connection for http client
|
||||
|
||||
|
||||
## 0.1.0 - 2019-04-16
|
||||
|
||||
- No changes
|
||||
|
||||
|
||||
## 0.1.0-alpha.3 - 2019-04-02
|
||||
|
||||
- Request functions accept path #743
|
||||
|
||||
|
||||
## 0.1.0-alpha.2 - 2019-03-29
|
||||
|
||||
- Added TestServerRuntime::load_body() method
|
||||
- Update actix-http and awc libraries
|
||||
|
||||
|
||||
## 0.1.0-alpha.1 - 2019-03-28
|
||||
|
||||
- Initial impl
|
||||
|
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "actix-http-test"
|
||||
version = "3.0.0-beta.11"
|
||||
version = "3.1.0"
|
||||
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
|
||||
description = "Various helpers for Actix applications to use during testing"
|
||||
keywords = ["http", "web", "framework", "async", "futures"]
|
||||
@ -29,17 +29,16 @@ default = []
|
||||
openssl = ["tls-openssl", "awc/openssl"]
|
||||
|
||||
[dependencies]
|
||||
actix-service = "2.0.0"
|
||||
actix-codec = "0.4.1"
|
||||
actix-tls = "3.0.0"
|
||||
actix-utils = "3.0.0"
|
||||
actix-service = "2"
|
||||
actix-codec = "0.5"
|
||||
actix-tls = "3"
|
||||
actix-utils = "3"
|
||||
actix-rt = "2.2"
|
||||
actix-server = "2"
|
||||
awc = { version = "3.0.0-beta.19", default-features = false }
|
||||
awc = { version = "3", default-features = false }
|
||||
|
||||
base64 = "0.13"
|
||||
bytes = "1"
|
||||
futures-core = { version = "0.3.7", default-features = false }
|
||||
futures-core = { version = "0.3.17", default-features = false }
|
||||
http = "0.2.5"
|
||||
log = "0.4"
|
||||
socket2 = "0.4"
|
||||
@ -48,8 +47,8 @@ serde_json = "1.0"
|
||||
slab = "0.4"
|
||||
serde_urlencoded = "0.7"
|
||||
tls-openssl = { version = "0.10.9", package = "openssl", optional = true }
|
||||
tokio = { version = "1.8.4", features = ["sync"] }
|
||||
tokio = { version = "1.24.2", features = ["sync"] }
|
||||
|
||||
[dev-dependencies]
|
||||
actix-web = { version = "4.0.0-beta.21", default-features = false, features = ["cookies"] }
|
||||
actix-http = "3.0.0-beta.19"
|
||||
actix-web = { version = "4", default-features = false, features = ["cookies"] }
|
||||
actix-http = "3"
|
||||
|
@ -3,15 +3,15 @@
|
||||
> Various helpers for Actix applications to use during testing.
|
||||
|
||||
[](https://crates.io/crates/actix-http-test)
|
||||
[](https://docs.rs/actix-http-test/3.0.0-beta.11)
|
||||
[](https://blog.rust-lang.org/2021/05/06/Rust-1.54.0.html)
|
||||
[](https://docs.rs/actix-http-test/3.1.0)
|
||||

|
||||

|
||||
<br>
|
||||
[](https://deps.rs/crate/actix-http-test/3.0.0-beta.11)
|
||||
[](https://deps.rs/crate/actix-http-test/3.1.0)
|
||||
[](https://crates.io/crates/actix-http-test)
|
||||
[](https://discord.gg/NWpN5mmg3x)
|
||||
|
||||
## Documentation & Resources
|
||||
|
||||
- [API Documentation](https://docs.rs/actix-http-test)
|
||||
- Minimum Supported Rust Version (MSRV): 1.54
|
||||
- Minimum Supported Rust Version (MSRV): 1.59
|
||||
|
@ -2,6 +2,7 @@
|
||||
|
||||
#![deny(rust_2018_idioms, nonstandard_style)]
|
||||
#![warn(future_incompatible)]
|
||||
#![allow(clippy::uninlined_format_args)]
|
||||
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
|
||||
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
|
||||
|
||||
@ -87,6 +88,7 @@ pub async fn test_server_with_addr<F: ServerServiceFactory<TcpStream>>(
|
||||
|
||||
// notify TestServer that server and system have shut down
|
||||
// all thread managed resources should be dropped at this point
|
||||
#[allow(clippy::let_underscore_future)]
|
||||
let _ = thread_stop_tx.send(());
|
||||
});
|
||||
|
||||
@ -294,6 +296,7 @@ impl Drop for TestServer {
|
||||
// without needing to await anything
|
||||
|
||||
// signal server to stop
|
||||
#[allow(clippy::let_underscore_future)]
|
||||
let _ = self.server.stop(true);
|
||||
|
||||
// signal system to stop
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "actix-http"
|
||||
version = "3.0.0-beta.19"
|
||||
version = "3.3.0"
|
||||
authors = [
|
||||
"Nikolay Kim <fafhrd91@gmail.com>",
|
||||
"Rob Ede <robjtede@icloud.com>",
|
||||
@ -20,7 +20,7 @@ edition = "2018"
|
||||
|
||||
[package.metadata.docs.rs]
|
||||
# features that docs.rs will build with
|
||||
features = ["openssl", "rustls", "compress-brotli", "compress-gzip", "compress-zstd"]
|
||||
features = ["http2", "ws", "openssl", "rustls", "compress-brotli", "compress-gzip", "compress-zstd"]
|
||||
|
||||
[lib]
|
||||
name = "actix_http"
|
||||
@ -29,82 +29,101 @@ path = "src/lib.rs"
|
||||
[features]
|
||||
default = []
|
||||
|
||||
# openssl
|
||||
# HTTP/2 protocol support
|
||||
http2 = ["h2"]
|
||||
|
||||
# WebSocket protocol implementation
|
||||
ws = [
|
||||
"local-channel",
|
||||
"base64",
|
||||
"rand",
|
||||
"sha1",
|
||||
]
|
||||
|
||||
# TLS via OpenSSL
|
||||
openssl = ["actix-tls/accept", "actix-tls/openssl"]
|
||||
|
||||
# rustls support
|
||||
# TLS via Rustls
|
||||
rustls = ["actix-tls/accept", "actix-tls/rustls"]
|
||||
|
||||
# enable compression support
|
||||
compress-brotli = ["brotli", "__compress"]
|
||||
compress-gzip = ["flate2", "__compress"]
|
||||
compress-zstd = ["zstd", "__compress"]
|
||||
# Compression codecs
|
||||
compress-brotli = ["__compress", "brotli"]
|
||||
compress-gzip = ["__compress", "flate2"]
|
||||
compress-zstd = ["__compress", "zstd"]
|
||||
|
||||
# Internal (PRIVATE!) features used to aid testing and cheking feature status.
|
||||
# Don't rely on these whatsoever. They may disappear at anytime.
|
||||
# Don't rely on these whatsoever. They are semver-exempt and may disappear at anytime.
|
||||
__compress = []
|
||||
|
||||
[dependencies]
|
||||
actix-service = "2.0.0"
|
||||
actix-codec = "0.4.1"
|
||||
actix-utils = "3.0.0"
|
||||
actix-service = "2"
|
||||
actix-codec = "0.5"
|
||||
actix-utils = "3"
|
||||
actix-rt = { version = "2.2", default-features = false }
|
||||
|
||||
ahash = "0.7"
|
||||
base64 = "0.13"
|
||||
ahash = "0.8"
|
||||
bitflags = "1.2"
|
||||
bytes = "1"
|
||||
bytestring = "1"
|
||||
derive_more = "0.99.5"
|
||||
encoding_rs = "0.8"
|
||||
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
|
||||
h2 = "0.3.9"
|
||||
futures-core = { version = "0.3.17", default-features = false, features = ["alloc"] }
|
||||
http = "0.2.5"
|
||||
httparse = "1.5.1"
|
||||
httpdate = "1.0.1"
|
||||
itoa = "1"
|
||||
language-tags = "0.3"
|
||||
local-channel = "0.1"
|
||||
log = "0.4"
|
||||
mime = "0.3"
|
||||
percent-encoding = "2.1"
|
||||
pin-project-lite = "0.2"
|
||||
rand = "0.8"
|
||||
sha-1 = "0.10"
|
||||
smallvec = "1.6.1"
|
||||
tokio = { version = "1.24.2", features = [] }
|
||||
tokio-util = { version = "0.7", features = ["io", "codec"] }
|
||||
tracing = { version = "0.1.30", default-features = false, features = ["log"] }
|
||||
|
||||
# tls
|
||||
actix-tls = { version = "3.0.0", default-features = false, optional = true }
|
||||
# http2
|
||||
h2 = { version = "0.3.9", optional = true }
|
||||
|
||||
# compression
|
||||
# websockets
|
||||
local-channel = { version = "0.1", optional = true }
|
||||
base64 = { version = "0.21", optional = true }
|
||||
rand = { version = "0.8", optional = true }
|
||||
sha1 = { version = "0.10", optional = true }
|
||||
|
||||
# openssl/rustls
|
||||
actix-tls = { version = "3", default-features = false, optional = true }
|
||||
|
||||
# compress-*
|
||||
brotli = { version = "3.3.3", optional = true }
|
||||
flate2 = { version = "1.0.13", optional = true }
|
||||
zstd = { version = "0.9", optional = true }
|
||||
zstd = { version = "0.12", optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
actix-http-test = { version = "3.0.0-beta.11", features = ["openssl"] }
|
||||
actix-http-test = { version = "3", features = ["openssl"] }
|
||||
actix-server = "2"
|
||||
actix-tls = { version = "3.0.0", features = ["openssl"] }
|
||||
actix-web = "4.0.0-beta.21"
|
||||
actix-tls = { version = "3", features = ["openssl"] }
|
||||
actix-web = "4"
|
||||
|
||||
async-stream = "0.3"
|
||||
criterion = { version = "0.3", features = ["html_reports"] }
|
||||
criterion = { version = "0.4", features = ["html_reports"] }
|
||||
env_logger = "0.9"
|
||||
futures-util = { version = "0.3.7", default-features = false, features = ["alloc"] }
|
||||
futures-util = { version = "0.3.17", default-features = false, features = ["alloc"] }
|
||||
memchr = "2.4"
|
||||
rcgen = "0.8"
|
||||
once_cell = "1.9"
|
||||
rcgen = "0.9"
|
||||
regex = "1.3"
|
||||
rustls-pemfile = "0.2"
|
||||
rustversion = "1"
|
||||
rustls-pemfile = "1"
|
||||
serde = { version = "1.0", features = ["derive"] }
|
||||
serde_json = "1.0"
|
||||
static_assertions = "1"
|
||||
tls-openssl = { package = "openssl", version = "0.10.9" }
|
||||
tls-rustls = { package = "rustls", version = "0.20.0" }
|
||||
tokio = { version = "1.8.4", features = ["net", "rt", "macros"] }
|
||||
tokio = { version = "1.24.2", features = ["net", "rt", "macros"] }
|
||||
|
||||
[[example]]
|
||||
name = "ws"
|
||||
required-features = ["rustls"]
|
||||
required-features = ["ws", "rustls"]
|
||||
|
||||
[[bench]]
|
||||
name = "write-camel-case"
|
||||
|
@ -3,18 +3,18 @@
|
||||
> HTTP primitives for the Actix ecosystem.
|
||||
|
||||
[](https://crates.io/crates/actix-http)
|
||||
[](https://docs.rs/actix-http/3.0.0-beta.19)
|
||||
[](https://blog.rust-lang.org/2021/05/06/Rust-1.54.0.html)
|
||||
[](https://docs.rs/actix-http/3.3.0)
|
||||

|
||||

|
||||
<br />
|
||||
[](https://deps.rs/crate/actix-http/3.0.0-beta.19)
|
||||
[](https://deps.rs/crate/actix-http/3.3.0)
|
||||
[](https://crates.io/crates/actix-http)
|
||||
[](https://discord.gg/NWpN5mmg3x)
|
||||
|
||||
## Documentation & Resources
|
||||
|
||||
- [API Documentation](https://docs.rs/actix-http)
|
||||
- Minimum Supported Rust Version (MSRV): 1.54
|
||||
- Minimum Supported Rust Version (MSRV): 1.59
|
||||
|
||||
## Example
|
||||
|
||||
@ -25,7 +25,7 @@ use actix_http::{HttpService, Response};
|
||||
use actix_server::Server;
|
||||
use futures_util::future;
|
||||
use http::header::HeaderValue;
|
||||
use log::info;
|
||||
use tracing::info;
|
||||
|
||||
#[actix_rt::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
@ -49,18 +49,3 @@ async fn main() -> io::Result<()> {
|
||||
.await
|
||||
}
|
||||
```
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under either of
|
||||
|
||||
- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
|
||||
- MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
|
||||
|
||||
at your option.
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
Contribution to the actix-http crate is organized under the terms of the
|
||||
Contributor Covenant, the maintainer of actix-http, @fafhrd91, promises to
|
||||
intervene to uphold that code of conduct.
|
||||
|
@ -1,3 +1,5 @@
|
||||
#![allow(clippy::uninlined_format_args)]
|
||||
|
||||
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
|
||||
|
||||
const CODES: &[u16] = &[0, 1000, 201, 800, 550];
|
||||
|
@ -114,11 +114,12 @@ mod _original {
|
||||
use std::mem::MaybeUninit;
|
||||
|
||||
pub fn parse_headers(src: &mut BytesMut) -> usize {
|
||||
#![allow(clippy::uninit_assumed_init)]
|
||||
#![allow(invalid_value, clippy::uninit_assumed_init)]
|
||||
|
||||
let mut headers: [HeaderIndex; MAX_HEADERS] =
|
||||
unsafe { MaybeUninit::uninit().assume_init() };
|
||||
|
||||
#[allow(invalid_value)]
|
||||
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] =
|
||||
unsafe { MaybeUninit::uninit().assume_init() };
|
||||
|
||||
|
@ -18,7 +18,8 @@ async fn main() -> std::io::Result<()> {
|
||||
HttpService::build()
|
||||
// pass the app to service builder
|
||||
// map_config is used to map App's configuration to ServiceBuilder
|
||||
.finish(map_config(app, |_| AppConfig::default()))
|
||||
// h1 will configure server to only use HTTP/1.1
|
||||
.h1(map_config(app, |_| AppConfig::default()))
|
||||
.tcp()
|
||||
})?
|
||||
.run()
|
||||
|
27
actix-http/examples/bench.rs
Normal file
27
actix-http/examples/bench.rs
Normal file
@ -0,0 +1,27 @@
|
||||
use std::{convert::Infallible, io, time::Duration};
|
||||
|
||||
use actix_http::{HttpService, Request, Response, StatusCode};
|
||||
use actix_server::Server;
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
static STR: Lazy<String> = Lazy::new(|| "HELLO WORLD ".repeat(20));
|
||||
|
||||
#[actix_rt::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
|
||||
|
||||
Server::build()
|
||||
.bind("dispatcher-benchmark", ("127.0.0.1", 8080), || {
|
||||
HttpService::build()
|
||||
.client_request_timeout(Duration::from_secs(1))
|
||||
.finish(|_: Request| async move {
|
||||
let mut res = Response::build(StatusCode::OK);
|
||||
Ok::<_, Infallible>(res.body(&**STR))
|
||||
})
|
||||
.tcp()
|
||||
})?
|
||||
// limiting number of workers so that bench client is not sharing as many resources
|
||||
.workers(4)
|
||||
.run()
|
||||
.await
|
||||
}
|
@ -1,10 +1,11 @@
|
||||
use std::io;
|
||||
use std::{io, time::Duration};
|
||||
|
||||
use actix_http::{Error, HttpService, Request, Response, StatusCode};
|
||||
use actix_server::Server;
|
||||
use bytes::BytesMut;
|
||||
use futures_util::StreamExt as _;
|
||||
use http::header::HeaderValue;
|
||||
use tracing::info;
|
||||
|
||||
#[actix_rt::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
@ -13,8 +14,8 @@ async fn main() -> io::Result<()> {
|
||||
Server::build()
|
||||
.bind("echo", ("127.0.0.1", 8080), || {
|
||||
HttpService::build()
|
||||
.client_timeout(1000)
|
||||
.client_disconnect(1000)
|
||||
.client_request_timeout(Duration::from_secs(1))
|
||||
.client_disconnect_timeout(Duration::from_secs(1))
|
||||
// handles HTTP/1.1 and HTTP/2
|
||||
.finish(|mut req: Request| async move {
|
||||
let mut body = BytesMut::new();
|
||||
@ -22,7 +23,7 @@ async fn main() -> io::Result<()> {
|
||||
body.extend_from_slice(&item?);
|
||||
}
|
||||
|
||||
log::info!("request body: {:?}", body);
|
||||
info!("request body: {:?}", body);
|
||||
|
||||
let res = Response::build(StatusCode::OK)
|
||||
.insert_header(("x-head", HeaderValue::from_static("dummy value!")))
|
||||
|
29
actix-http/examples/h2c-detect.rs
Normal file
29
actix-http/examples/h2c-detect.rs
Normal file
@ -0,0 +1,29 @@
|
||||
//! An example that supports automatic selection of plaintext h1/h2c connections.
|
||||
//!
|
||||
//! Notably, both the following commands will work.
|
||||
//! ```console
|
||||
//! $ curl --http1.1 'http://localhost:8080/'
|
||||
//! $ curl --http2-prior-knowledge 'http://localhost:8080/'
|
||||
//! ```
|
||||
|
||||
use std::{convert::Infallible, io};
|
||||
|
||||
use actix_http::{HttpService, Request, Response, StatusCode};
|
||||
use actix_server::Server;
|
||||
|
||||
#[tokio::main(flavor = "current_thread")]
|
||||
async fn main() -> io::Result<()> {
|
||||
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
|
||||
|
||||
Server::build()
|
||||
.bind("h2c-detect", ("127.0.0.1", 8080), || {
|
||||
HttpService::build()
|
||||
.finish(|_req: Request| async move {
|
||||
Ok::<_, Infallible>(Response::build(StatusCode::OK).body("Hello!"))
|
||||
})
|
||||
.tcp_auto_h2c()
|
||||
})?
|
||||
.workers(2)
|
||||
.run()
|
||||
.await
|
||||
}
|
25
actix-http/examples/h2spec.rs
Normal file
25
actix-http/examples/h2spec.rs
Normal file
@ -0,0 +1,25 @@
|
||||
use std::{convert::Infallible, io};
|
||||
|
||||
use actix_http::{HttpService, Request, Response, StatusCode};
|
||||
use actix_server::Server;
|
||||
use once_cell::sync::Lazy;
|
||||
|
||||
static STR: Lazy<String> = Lazy::new(|| "HELLO WORLD ".repeat(100));
|
||||
|
||||
#[actix_rt::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
|
||||
|
||||
Server::build()
|
||||
.bind("h2spec", ("127.0.0.1", 8080), || {
|
||||
HttpService::build()
|
||||
.h2(|_: Request| async move {
|
||||
let mut res = Response::build(StatusCode::OK);
|
||||
Ok::<_, Infallible>(res.body(&**STR))
|
||||
})
|
||||
.tcp()
|
||||
})?
|
||||
.workers(4)
|
||||
.run()
|
||||
.await
|
||||
}
|
@ -1,9 +1,8 @@
|
||||
use std::{convert::Infallible, io};
|
||||
use std::{convert::Infallible, io, time::Duration};
|
||||
|
||||
use actix_http::{
|
||||
header::HeaderValue, HttpMessage, HttpService, Request, Response, StatusCode,
|
||||
};
|
||||
use actix_http::{header::HeaderValue, HttpService, Request, Response, StatusCode};
|
||||
use actix_server::Server;
|
||||
use tracing::info;
|
||||
|
||||
#[actix_rt::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
@ -12,18 +11,18 @@ async fn main() -> io::Result<()> {
|
||||
Server::build()
|
||||
.bind("hello-world", ("127.0.0.1", 8080), || {
|
||||
HttpService::build()
|
||||
.client_timeout(1000)
|
||||
.client_disconnect(1000)
|
||||
.client_request_timeout(Duration::from_secs(1))
|
||||
.client_disconnect_timeout(Duration::from_secs(1))
|
||||
.on_connect_ext(|_, ext| {
|
||||
ext.insert(42u32);
|
||||
})
|
||||
.finish(|req: Request| async move {
|
||||
log::info!("{:?}", req);
|
||||
info!("{:?}", req);
|
||||
|
||||
let mut res = Response::build(StatusCode::OK);
|
||||
res.insert_header(("x-head", HeaderValue::from_static("dummy value!")));
|
||||
|
||||
let forty_two = req.extensions().get::<u32>().unwrap().to_string();
|
||||
let forty_two = req.conn_data::<u32>().unwrap().to_string();
|
||||
res.insert_header((
|
||||
"x-forty-two",
|
||||
HeaderValue::from_str(&forty_two).unwrap(),
|
||||
|
@ -12,6 +12,7 @@ use actix_http::{body::BodyStream, HttpService, Response};
|
||||
use actix_server::Server;
|
||||
use async_stream::stream;
|
||||
use bytes::Bytes;
|
||||
use tracing::info;
|
||||
|
||||
#[actix_rt::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
@ -21,7 +22,7 @@ async fn main() -> io::Result<()> {
|
||||
.bind("streaming-error", ("127.0.0.1", 8080), || {
|
||||
HttpService::build()
|
||||
.finish(|req| async move {
|
||||
log::info!("{:?}", req);
|
||||
info!("{:?}", req);
|
||||
let res = Response::ok();
|
||||
|
||||
Ok::<_, Infallible>(res.set_body(BodyStream::new(stream! {
|
||||
|
@ -10,13 +10,14 @@ use std::{
|
||||
time::Duration,
|
||||
};
|
||||
|
||||
use actix_codec::Encoder;
|
||||
use actix_http::{body::BodyStream, error::Error, ws, HttpService, Request, Response};
|
||||
use actix_rt::time::{interval, Interval};
|
||||
use actix_server::Server;
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use bytestring::ByteString;
|
||||
use futures_core::{ready, Stream};
|
||||
use tokio_util::codec::Encoder;
|
||||
use tracing::{info, trace};
|
||||
|
||||
#[actix_rt::main]
|
||||
async fn main() -> io::Result<()> {
|
||||
@ -34,13 +35,13 @@ async fn main() -> io::Result<()> {
|
||||
}
|
||||
|
||||
async fn handler(req: Request) -> Result<Response<BodyStream<Heartbeat>>, Error> {
|
||||
log::info!("handshaking");
|
||||
info!("handshaking");
|
||||
let mut res = ws::handshake(req.head())?;
|
||||
|
||||
// handshake will always fail under HTTP/2
|
||||
|
||||
log::info!("responding");
|
||||
Ok(res.message_body(BodyStream::new(Heartbeat::new(ws::Codec::new())))?)
|
||||
info!("responding");
|
||||
res.message_body(BodyStream::new(Heartbeat::new(ws::Codec::new())))
|
||||
}
|
||||
|
||||
struct Heartbeat {
|
||||
@ -61,7 +62,7 @@ impl Stream for Heartbeat {
|
||||
type Item = Result<Bytes, Error>;
|
||||
|
||||
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
log::trace!("poll");
|
||||
trace!("poll");
|
||||
|
||||
ready!(self.as_mut().interval.poll_tick(cx));
|
||||
|
||||
|
@ -80,7 +80,7 @@ mod tests {
|
||||
use futures_core::ready;
|
||||
use futures_util::{stream, FutureExt as _};
|
||||
use pin_project_lite::pin_project;
|
||||
use static_assertions::{assert_impl_all, assert_not_impl_all};
|
||||
use static_assertions::{assert_impl_all, assert_not_impl_any};
|
||||
|
||||
use super::*;
|
||||
use crate::body::to_bytes;
|
||||
@ -91,10 +91,10 @@ mod tests {
|
||||
assert_impl_all!(BodyStream<stream::Empty<Result<Bytes, Infallible>>>: MessageBody);
|
||||
assert_impl_all!(BodyStream<stream::Repeat<Result<Bytes, Infallible>>>: MessageBody);
|
||||
|
||||
assert_not_impl_all!(BodyStream<stream::Empty<Bytes>>: MessageBody);
|
||||
assert_not_impl_all!(BodyStream<stream::Repeat<Bytes>>: MessageBody);
|
||||
assert_not_impl_any!(BodyStream<stream::Empty<Bytes>>: MessageBody);
|
||||
assert_not_impl_any!(BodyStream<stream::Repeat<Bytes>>: MessageBody);
|
||||
// crate::Error is not Clone
|
||||
assert_not_impl_all!(BodyStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody);
|
||||
assert_not_impl_any!(BodyStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody);
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn skips_empty_chunks() {
|
||||
|
@ -31,7 +31,7 @@ impl fmt::Debug for BoxBodyInner {
|
||||
}
|
||||
|
||||
impl BoxBody {
|
||||
/// Same as `MessageBody::boxed`.
|
||||
/// Boxes body type, erasing type information.
|
||||
///
|
||||
/// If the body type to wrap is unknown or generic it is better to use [`MessageBody::boxed`] to
|
||||
/// avoid double boxing.
|
||||
@ -105,14 +105,13 @@ impl MessageBody for BoxBody {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
use static_assertions::{assert_impl_all, assert_not_impl_all};
|
||||
use static_assertions::{assert_impl_all, assert_not_impl_any};
|
||||
|
||||
use super::*;
|
||||
use crate::body::to_bytes;
|
||||
|
||||
assert_impl_all!(BoxBody: MessageBody, fmt::Debug, Unpin);
|
||||
|
||||
assert_not_impl_all!(BoxBody: Send, Sync, Unpin);
|
||||
assert_impl_all!(BoxBody: fmt::Debug, MessageBody, Unpin);
|
||||
assert_not_impl_any!(BoxBody: Send, Sync);
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn nested_boxed_body() {
|
||||
|
@ -10,6 +10,17 @@ use super::{BodySize, BoxBody, MessageBody};
|
||||
use crate::Error;
|
||||
|
||||
pin_project! {
|
||||
/// An "either" type specialized for body types.
|
||||
///
|
||||
/// It is common, in middleware especially, to conditionally return an inner service's unknown/
|
||||
/// generic body `B` type or return early with a new response. This type's "right" variant
|
||||
/// defaults to `BoxBody` since error responses are the common case.
|
||||
///
|
||||
/// For example, middleware will often have `type Response = ServiceResponse<EitherBody<B>>`.
|
||||
/// This means that the inner service's response body type maps to the `Left` variant and the
|
||||
/// middleware's own error responses use the default `Right` variant of `BoxBody`. Of course,
|
||||
/// there's no reason it couldn't use `EitherBody<B, String>` instead if its alternative
|
||||
/// responses have a known type.
|
||||
#[project = EitherBodyProj]
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum EitherBody<L, R = BoxBody> {
|
||||
@ -22,7 +33,10 @@ pin_project! {
|
||||
}
|
||||
|
||||
impl<L> EitherBody<L, BoxBody> {
|
||||
/// Creates new `EitherBody` using left variant and boxed right variant.
|
||||
/// Creates new `EitherBody` left variant with a boxed right variant.
|
||||
///
|
||||
/// If the expected `R` type will be inferred and is not `BoxBody` then use the
|
||||
/// [`left`](Self::left) constructor instead.
|
||||
#[inline]
|
||||
pub fn new(body: L) -> Self {
|
||||
Self::Left { body }
|
||||
|
@ -14,8 +14,44 @@ use pin_project_lite::pin_project;
|
||||
|
||||
use super::{BodySize, BoxBody};
|
||||
|
||||
/// An interface types that can converted to bytes and used as response bodies.
|
||||
// TODO: examples
|
||||
/// An interface for types that can be used as a response body.
|
||||
///
|
||||
/// It is not usually necessary to create custom body types, this trait is already [implemented for
|
||||
/// a large number of sensible body types](#foreign-impls) including:
|
||||
/// - Empty body: `()`
|
||||
/// - Text-based: `String`, `&'static str`, [`ByteString`](https://docs.rs/bytestring/1).
|
||||
/// - Byte-based: `Bytes`, `BytesMut`, `Vec<u8>`, `&'static [u8]`;
|
||||
/// - Streams: [`BodyStream`](super::BodyStream), [`SizedStream`](super::SizedStream)
|
||||
///
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// # use std::convert::Infallible;
|
||||
/// # use std::task::{Poll, Context};
|
||||
/// # use std::pin::Pin;
|
||||
/// # use bytes::Bytes;
|
||||
/// # use actix_http::body::{BodySize, MessageBody};
|
||||
/// struct Repeat {
|
||||
/// chunk: String,
|
||||
/// n_times: usize,
|
||||
/// }
|
||||
///
|
||||
/// impl MessageBody for Repeat {
|
||||
/// type Error = Infallible;
|
||||
///
|
||||
/// fn size(&self) -> BodySize {
|
||||
/// BodySize::Sized((self.chunk.len() * self.n_times) as u64)
|
||||
/// }
|
||||
///
|
||||
/// fn poll_next(
|
||||
/// self: Pin<&mut Self>,
|
||||
/// _cx: &mut Context<'_>,
|
||||
/// ) -> Poll<Option<Result<Bytes, Self::Error>>> {
|
||||
/// let payload_string = self.chunk.repeat(self.n_times);
|
||||
/// let payload_bytes = Bytes::from(payload_string);
|
||||
/// Poll::Ready(Some(Ok(payload_bytes)))
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
pub trait MessageBody {
|
||||
/// The type of error that will be returned if streaming body fails.
|
||||
///
|
||||
@ -29,7 +65,22 @@ pub trait MessageBody {
|
||||
fn size(&self) -> BodySize;
|
||||
|
||||
/// Attempt to pull out the next chunk of body bytes.
|
||||
// TODO: expand documentation
|
||||
///
|
||||
/// # Return Value
|
||||
/// Similar to the `Stream` interface, there are several possible return values, each indicating
|
||||
/// a distinct state:
|
||||
/// - `Poll::Pending` means that this body's next chunk is not ready yet. Implementations must
|
||||
/// ensure that the current task will be notified when the next chunk may be ready.
|
||||
/// - `Poll::Ready(Some(val))` means that the body has successfully produced a chunk, `val`,
|
||||
/// and may produce further values on subsequent `poll_next` calls.
|
||||
/// - `Poll::Ready(None)` means that the body is complete, and `poll_next` should not be
|
||||
/// invoked again.
|
||||
///
|
||||
/// # Panics
|
||||
/// Once a body is complete (i.e., `poll_next` returned `Ready(None)`), calling its `poll_next`
|
||||
/// method again may panic, block forever, or cause other kinds of problems; this trait places
|
||||
/// no requirements on the effects of such a call. However, as the `poll_next` method is not
|
||||
/// marked unsafe, Rust’s usual rules apply: calls must never cause UB, regardless of its state.
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
@ -37,7 +88,7 @@ pub trait MessageBody {
|
||||
|
||||
/// Try to convert into the complete chunk of body bytes.
|
||||
///
|
||||
/// Implement this method if the entire body can be trivially extracted. This is useful for
|
||||
/// Override this method if the complete body can be trivially extracted. This is useful for
|
||||
/// optimizations where `poll_next` calls can be avoided.
|
||||
///
|
||||
/// Body types with [`BodySize::None`] are allowed to return empty `Bytes`. Although, if calling
|
||||
@ -54,7 +105,11 @@ pub trait MessageBody {
|
||||
Err(self)
|
||||
}
|
||||
|
||||
/// Converts this body into `BoxBody`.
|
||||
/// Wraps this body into a `BoxBody`.
|
||||
///
|
||||
/// No-op when called on a `BoxBody`, meaning there is no risk of double boxing when calling
|
||||
/// this on a generic `MessageBody`. Prefer this over [`BoxBody::new`] when a boxed body
|
||||
/// is required.
|
||||
#[inline]
|
||||
fn boxed(self) -> BoxBody
|
||||
where
|
||||
@ -65,8 +120,28 @@ pub trait MessageBody {
|
||||
}
|
||||
|
||||
mod foreign_impls {
|
||||
use std::{borrow::Cow, ops::DerefMut};
|
||||
|
||||
use super::*;
|
||||
|
||||
impl<B> MessageBody for &mut B
|
||||
where
|
||||
B: MessageBody + Unpin + ?Sized,
|
||||
{
|
||||
type Error = B::Error;
|
||||
|
||||
fn size(&self) -> BodySize {
|
||||
(**self).size()
|
||||
}
|
||||
|
||||
fn poll_next(
|
||||
mut self: Pin<&mut Self>,
|
||||
cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Self::Error>>> {
|
||||
Pin::new(&mut **self).poll_next(cx)
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for Infallible {
|
||||
type Error = Infallible;
|
||||
|
||||
@ -124,8 +199,9 @@ mod foreign_impls {
|
||||
}
|
||||
}
|
||||
|
||||
impl<B> MessageBody for Pin<Box<B>>
|
||||
impl<T, B> MessageBody for Pin<T>
|
||||
where
|
||||
T: DerefMut<Target = B> + Unpin,
|
||||
B: MessageBody + ?Sized,
|
||||
{
|
||||
type Error = B::Error;
|
||||
@ -248,6 +324,39 @@ mod foreign_impls {
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for Cow<'static, [u8]> {
|
||||
type Error = Infallible;
|
||||
|
||||
#[inline]
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Sized(self.len() as u64)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
_cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Self::Error>>> {
|
||||
if self.is_empty() {
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
let bytes = match mem::take(self.get_mut()) {
|
||||
Cow::Borrowed(b) => Bytes::from_static(b),
|
||||
Cow::Owned(b) => Bytes::from(b),
|
||||
};
|
||||
Poll::Ready(Some(Ok(bytes)))
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn try_into_bytes(self) -> Result<Bytes, Self> {
|
||||
match self {
|
||||
Cow::Borrowed(b) => Ok(Bytes::from_static(b)),
|
||||
Cow::Owned(b) => Ok(Bytes::from(b)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for &'static str {
|
||||
type Error = Infallible;
|
||||
|
||||
@ -303,6 +412,39 @@ mod foreign_impls {
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for Cow<'static, str> {
|
||||
type Error = Infallible;
|
||||
|
||||
#[inline]
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Sized(self.len() as u64)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn poll_next(
|
||||
self: Pin<&mut Self>,
|
||||
_cx: &mut Context<'_>,
|
||||
) -> Poll<Option<Result<Bytes, Self::Error>>> {
|
||||
if self.is_empty() {
|
||||
Poll::Ready(None)
|
||||
} else {
|
||||
let bytes = match mem::take(self.get_mut()) {
|
||||
Cow::Borrowed(s) => Bytes::from_static(s.as_bytes()),
|
||||
Cow::Owned(s) => Bytes::from(s.into_bytes()),
|
||||
};
|
||||
Poll::Ready(Some(Ok(bytes)))
|
||||
}
|
||||
}
|
||||
|
||||
#[inline]
|
||||
fn try_into_bytes(self) -> Result<Bytes, Self> {
|
||||
match self {
|
||||
Cow::Borrowed(s) => Ok(Bytes::from_static(s.as_bytes())),
|
||||
Cow::Owned(s) => Ok(Bytes::from(s.into_bytes())),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl MessageBody for bytestring::ByteString {
|
||||
type Error = Infallible;
|
||||
|
||||
@ -390,6 +532,7 @@ mod tests {
|
||||
use actix_rt::pin;
|
||||
use actix_utils::future::poll_fn;
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use futures_util::stream;
|
||||
|
||||
use super::*;
|
||||
use crate::body::{self, EitherBody};
|
||||
@ -426,6 +569,35 @@ mod tests {
|
||||
assert_poll_next_none!(pl);
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn mut_equivalence() {
|
||||
assert_eq!(().size(), BodySize::Sized(0));
|
||||
assert_eq!(().size(), (&(&mut ())).size());
|
||||
|
||||
let pl = &mut ();
|
||||
pin!(pl);
|
||||
assert_poll_next_none!(pl);
|
||||
|
||||
let pl = &mut Box::new(());
|
||||
pin!(pl);
|
||||
assert_poll_next_none!(pl);
|
||||
|
||||
let mut body = body::SizedStream::new(
|
||||
8,
|
||||
stream::iter([
|
||||
Ok::<_, std::io::Error>(Bytes::from("1234")),
|
||||
Ok(Bytes::from("5678")),
|
||||
]),
|
||||
);
|
||||
let body = &mut body;
|
||||
assert_eq!(body.size(), BodySize::Sized(8));
|
||||
pin!(body);
|
||||
assert_poll_next!(body, Bytes::from_static(b"1234"));
|
||||
assert_poll_next!(body, Bytes::from_static(b"5678"));
|
||||
assert_poll_next_none!(body);
|
||||
}
|
||||
|
||||
#[allow(clippy::let_unit_value)]
|
||||
#[actix_rt::test]
|
||||
async fn test_unit() {
|
||||
let pl = ();
|
||||
@ -551,4 +723,18 @@ mod tests {
|
||||
let not_body = resp_body.downcast_ref::<()>();
|
||||
assert!(not_body.is_none());
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn non_owning_to_bytes() {
|
||||
let mut body = BoxBody::new(());
|
||||
let bytes = body::to_bytes(&mut body).await.unwrap();
|
||||
assert_eq!(bytes, Bytes::new());
|
||||
|
||||
let mut body = body::BodyStream::new(stream::iter([
|
||||
Ok::<_, std::io::Error>(Bytes::from("1234")),
|
||||
Ok(Bytes::from("5678")),
|
||||
]));
|
||||
let bytes = body::to_bytes(&mut body).await.unwrap();
|
||||
assert_eq!(bytes, Bytes::from_static(b"12345678"));
|
||||
}
|
||||
}
|
||||
|
@ -1,4 +1,9 @@
|
||||
//! Traits and structures to aid consuming and writing HTTP payloads.
|
||||
//!
|
||||
//! "Body" and "payload" are used somewhat interchangeably in this documentation.
|
||||
|
||||
// Though the spec kinda reads like "payload" is the possibly-transfer-encoded part of the message
|
||||
// and the "body" is the intended possibly-decoded version of that.
|
||||
|
||||
mod body_stream;
|
||||
mod boxed;
|
||||
|
@ -10,9 +10,12 @@ use super::{BodySize, MessageBody};
|
||||
|
||||
/// Body type for responses that forbid payloads.
|
||||
///
|
||||
/// Distinct from an empty response which would contain a Content-Length header.
|
||||
///
|
||||
/// This is distinct from an "empty" response which _would_ contain a `Content-Length` header.
|
||||
/// For an "empty" body, use `()` or `Bytes::new()`.
|
||||
///
|
||||
/// For example, the HTTP spec forbids a payload to be sent with a `204 No Content` response.
|
||||
/// In this case, the payload (or lack thereof) is implicit from the status code, so a
|
||||
/// `Content-Length` header is not required.
|
||||
#[derive(Debug, Clone, Copy, Default)]
|
||||
#[non_exhaustive]
|
||||
pub struct None;
|
||||
|
@ -44,7 +44,7 @@ where
|
||||
|
||||
#[inline]
|
||||
fn size(&self) -> BodySize {
|
||||
BodySize::Sized(self.size as u64)
|
||||
BodySize::Sized(self.size)
|
||||
}
|
||||
|
||||
/// Attempts to pull out the next value of the underlying [`Stream`].
|
||||
@ -76,7 +76,7 @@ mod tests {
|
||||
use actix_rt::pin;
|
||||
use actix_utils::future::poll_fn;
|
||||
use futures_util::stream;
|
||||
use static_assertions::{assert_impl_all, assert_not_impl_all};
|
||||
use static_assertions::{assert_impl_all, assert_not_impl_any};
|
||||
|
||||
use super::*;
|
||||
use crate::body::to_bytes;
|
||||
@ -87,10 +87,10 @@ mod tests {
|
||||
assert_impl_all!(SizedStream<stream::Empty<Result<Bytes, Infallible>>>: MessageBody);
|
||||
assert_impl_all!(SizedStream<stream::Repeat<Result<Bytes, Infallible>>>: MessageBody);
|
||||
|
||||
assert_not_impl_all!(SizedStream<stream::Empty<Bytes>>: MessageBody);
|
||||
assert_not_impl_all!(SizedStream<stream::Repeat<Bytes>>: MessageBody);
|
||||
assert_not_impl_any!(SizedStream<stream::Empty<Bytes>>: MessageBody);
|
||||
assert_not_impl_any!(SizedStream<stream::Repeat<Bytes>>: MessageBody);
|
||||
// crate::Error is not Clone
|
||||
assert_not_impl_all!(SizedStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody);
|
||||
assert_not_impl_any!(SizedStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody);
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn skips_empty_chunks() {
|
||||
|
@ -42,7 +42,7 @@ pub async fn to_bytes<B: MessageBody>(body: B) -> Result<Bytes, B::Error> {
|
||||
let body = body.as_mut();
|
||||
|
||||
match ready!(body.poll_next(cx)) {
|
||||
Some(Ok(bytes)) => buf.extend_from_slice(&*bytes),
|
||||
Some(Ok(bytes)) => buf.extend_from_slice(&bytes),
|
||||
None => return Poll::Ready(Ok(())),
|
||||
Some(Err(err)) => return Poll::Ready(Err(err)),
|
||||
}
|
||||
|
@ -1,25 +1,22 @@
|
||||
use std::{fmt, marker::PhantomData, net, rc::Rc};
|
||||
use std::{fmt, marker::PhantomData, net, rc::Rc, time::Duration};
|
||||
|
||||
use actix_codec::Framed;
|
||||
use actix_service::{IntoServiceFactory, Service, ServiceFactory};
|
||||
|
||||
use crate::{
|
||||
body::{BoxBody, MessageBody},
|
||||
config::{KeepAlive, ServiceConfig},
|
||||
h1::{self, ExpectHandler, H1Service, UpgradeHandler},
|
||||
h2::H2Service,
|
||||
service::HttpService,
|
||||
ConnectCallback, Extensions, Request, Response,
|
||||
ConnectCallback, Extensions, KeepAlive, Request, Response, ServiceConfig,
|
||||
};
|
||||
|
||||
/// A HTTP service builder
|
||||
/// An HTTP service builder.
|
||||
///
|
||||
/// This type can be used to construct an instance of [`HttpService`] through a
|
||||
/// builder-like pattern.
|
||||
/// This type can construct an instance of [`HttpService`] through a builder-like pattern.
|
||||
pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler> {
|
||||
keep_alive: KeepAlive,
|
||||
client_timeout: u64,
|
||||
client_disconnect: u64,
|
||||
client_request_timeout: Duration,
|
||||
client_disconnect_timeout: Duration,
|
||||
secure: bool,
|
||||
local_addr: Option<net::SocketAddr>,
|
||||
expect: X,
|
||||
@ -28,22 +25,23 @@ pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler> {
|
||||
_phantom: PhantomData<S>,
|
||||
}
|
||||
|
||||
impl<T, S> HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler>
|
||||
impl<T, S> Default for HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler>
|
||||
where
|
||||
S: ServiceFactory<Request, Config = ()>,
|
||||
S::Error: Into<Response<BoxBody>> + 'static,
|
||||
S::InitError: fmt::Debug,
|
||||
<S::Service as Service<Request>>::Future: 'static,
|
||||
{
|
||||
/// Create instance of `ServiceConfigBuilder`
|
||||
#[allow(clippy::new_without_default)]
|
||||
pub fn new() -> Self {
|
||||
fn default() -> Self {
|
||||
HttpServiceBuilder {
|
||||
keep_alive: KeepAlive::Timeout(5),
|
||||
client_timeout: 5000,
|
||||
client_disconnect: 0,
|
||||
// ServiceConfig parts (make sure defaults match)
|
||||
keep_alive: KeepAlive::default(),
|
||||
client_request_timeout: Duration::from_secs(5),
|
||||
client_disconnect_timeout: Duration::ZERO,
|
||||
secure: false,
|
||||
local_addr: None,
|
||||
|
||||
// dispatcher parts
|
||||
expect: ExpectHandler,
|
||||
upgrade: None,
|
||||
on_connect_ext: None,
|
||||
@ -65,9 +63,11 @@ where
|
||||
U::Error: fmt::Display,
|
||||
U::InitError: fmt::Debug,
|
||||
{
|
||||
/// Set server keep-alive setting.
|
||||
/// Set connection keep-alive setting.
|
||||
///
|
||||
/// By default keep alive is set to a 5 seconds.
|
||||
/// Applies to HTTP/1.1 keep-alive and HTTP/2 ping-pong.
|
||||
///
|
||||
/// By default keep-alive is 5 seconds.
|
||||
pub fn keep_alive<W: Into<KeepAlive>>(mut self, val: W) -> Self {
|
||||
self.keep_alive = val.into();
|
||||
self
|
||||
@ -85,33 +85,45 @@ where
|
||||
self
|
||||
}
|
||||
|
||||
/// Set server client timeout in milliseconds for first request.
|
||||
/// Set client request timeout (for first request).
|
||||
///
|
||||
/// Defines a timeout for reading client request header. If a client does not transmit
|
||||
/// the entire set headers within this time, the request is terminated with
|
||||
/// the 408 (Request Time-out) error.
|
||||
/// Defines a timeout for reading client request header. If the client does not transmit the
|
||||
/// request head within this duration, the connection is terminated with a `408 Request Timeout`
|
||||
/// response error.
|
||||
///
|
||||
/// To disable timeout set value to 0.
|
||||
/// A duration of zero disables the timeout.
|
||||
///
|
||||
/// By default client timeout is set to 5000 milliseconds.
|
||||
pub fn client_timeout(mut self, val: u64) -> Self {
|
||||
self.client_timeout = val;
|
||||
/// By default, the client timeout is 5 seconds.
|
||||
pub fn client_request_timeout(mut self, dur: Duration) -> Self {
|
||||
self.client_request_timeout = dur;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set server connection disconnect timeout in milliseconds.
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "3.0.0", note = "Renamed to `client_request_timeout`.")]
|
||||
pub fn client_timeout(self, dur: Duration) -> Self {
|
||||
self.client_request_timeout(dur)
|
||||
}
|
||||
|
||||
/// Set client connection disconnect timeout.
|
||||
///
|
||||
/// Defines a timeout for disconnect connection. If a disconnect procedure does not complete
|
||||
/// within this time, the request get dropped. This timeout affects secure connections.
|
||||
///
|
||||
/// To disable timeout set value to 0.
|
||||
/// A duration of zero disables the timeout.
|
||||
///
|
||||
/// By default disconnect timeout is set to 0.
|
||||
pub fn client_disconnect(mut self, val: u64) -> Self {
|
||||
self.client_disconnect = val;
|
||||
/// By default, the disconnect timeout is disabled.
|
||||
pub fn client_disconnect_timeout(mut self, dur: Duration) -> Self {
|
||||
self.client_disconnect_timeout = dur;
|
||||
self
|
||||
}
|
||||
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "3.0.0", note = "Renamed to `client_disconnect_timeout`.")]
|
||||
pub fn client_disconnect(self, dur: Duration) -> Self {
|
||||
self.client_disconnect_timeout(dur)
|
||||
}
|
||||
|
||||
/// Provide service for `EXPECT: 100-Continue` support.
|
||||
///
|
||||
/// Service get called with request that contains `EXPECT` header.
|
||||
@ -126,8 +138,8 @@ where
|
||||
{
|
||||
HttpServiceBuilder {
|
||||
keep_alive: self.keep_alive,
|
||||
client_timeout: self.client_timeout,
|
||||
client_disconnect: self.client_disconnect,
|
||||
client_request_timeout: self.client_request_timeout,
|
||||
client_disconnect_timeout: self.client_disconnect_timeout,
|
||||
secure: self.secure,
|
||||
local_addr: self.local_addr,
|
||||
expect: expect.into_factory(),
|
||||
@ -150,8 +162,8 @@ where
|
||||
{
|
||||
HttpServiceBuilder {
|
||||
keep_alive: self.keep_alive,
|
||||
client_timeout: self.client_timeout,
|
||||
client_disconnect: self.client_disconnect,
|
||||
client_request_timeout: self.client_request_timeout,
|
||||
client_disconnect_timeout: self.client_disconnect_timeout,
|
||||
secure: self.secure,
|
||||
local_addr: self.local_addr,
|
||||
expect: self.expect,
|
||||
@ -174,7 +186,7 @@ where
|
||||
self
|
||||
}
|
||||
|
||||
/// Finish service configuration and create a HTTP Service for HTTP/1 protocol.
|
||||
/// Finish service configuration and create a service for the HTTP/1 protocol.
|
||||
pub fn h1<F, B>(self, service: F) -> H1Service<T, S, B, X, U>
|
||||
where
|
||||
B: MessageBody,
|
||||
@ -185,8 +197,8 @@ where
|
||||
{
|
||||
let cfg = ServiceConfig::new(
|
||||
self.keep_alive,
|
||||
self.client_timeout,
|
||||
self.client_disconnect,
|
||||
self.client_request_timeout,
|
||||
self.client_disconnect_timeout,
|
||||
self.secure,
|
||||
self.local_addr,
|
||||
);
|
||||
@ -197,8 +209,10 @@ where
|
||||
.on_connect_ext(self.on_connect_ext)
|
||||
}
|
||||
|
||||
/// Finish service configuration and create a HTTP service for HTTP/2 protocol.
|
||||
pub fn h2<F, B>(self, service: F) -> H2Service<T, S, B>
|
||||
/// Finish service configuration and create a service for the HTTP/2 protocol.
|
||||
#[cfg(feature = "http2")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
|
||||
pub fn h2<F, B>(self, service: F) -> crate::h2::H2Service<T, S, B>
|
||||
where
|
||||
F: IntoServiceFactory<S, Request>,
|
||||
S::Error: Into<Response<BoxBody>> + 'static,
|
||||
@ -209,13 +223,14 @@ where
|
||||
{
|
||||
let cfg = ServiceConfig::new(
|
||||
self.keep_alive,
|
||||
self.client_timeout,
|
||||
self.client_disconnect,
|
||||
self.client_request_timeout,
|
||||
self.client_disconnect_timeout,
|
||||
self.secure,
|
||||
self.local_addr,
|
||||
);
|
||||
|
||||
H2Service::with_config(cfg, service.into_factory()).on_connect_ext(self.on_connect_ext)
|
||||
crate::h2::H2Service::with_config(cfg, service.into_factory())
|
||||
.on_connect_ext(self.on_connect_ext)
|
||||
}
|
||||
|
||||
/// Finish service configuration and create `HttpService` instance.
|
||||
@ -230,8 +245,8 @@ where
|
||||
{
|
||||
let cfg = ServiceConfig::new(
|
||||
self.keep_alive,
|
||||
self.client_timeout,
|
||||
self.client_disconnect,
|
||||
self.client_request_timeout,
|
||||
self.client_disconnect_timeout,
|
||||
self.secure,
|
||||
self.local_addr,
|
||||
);
|
||||
|
@ -1,106 +1,59 @@
|
||||
use std::{
|
||||
cell::Cell,
|
||||
fmt::{self, Write},
|
||||
net,
|
||||
rc::Rc,
|
||||
time::{Duration, SystemTime},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
|
||||
use actix_rt::{
|
||||
task::JoinHandle,
|
||||
time::{interval, sleep_until, Instant, Sleep},
|
||||
};
|
||||
use bytes::BytesMut;
|
||||
|
||||
/// "Sun, 06 Nov 1994 08:49:37 GMT".len()
|
||||
pub(crate) const DATE_VALUE_LENGTH: usize = 29;
|
||||
use crate::{date::DateService, KeepAlive};
|
||||
|
||||
#[derive(Debug, PartialEq, Clone, Copy)]
|
||||
/// Server keep-alive setting
|
||||
pub enum KeepAlive {
|
||||
/// Keep alive in seconds
|
||||
Timeout(usize),
|
||||
|
||||
/// Rely on OS to shutdown tcp connection
|
||||
Os,
|
||||
|
||||
/// Disabled
|
||||
Disabled,
|
||||
}
|
||||
|
||||
impl From<usize> for KeepAlive {
|
||||
fn from(keepalive: usize) -> Self {
|
||||
KeepAlive::Timeout(keepalive)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Option<usize>> for KeepAlive {
|
||||
fn from(keepalive: Option<usize>) -> Self {
|
||||
if let Some(keepalive) = keepalive {
|
||||
KeepAlive::Timeout(keepalive)
|
||||
} else {
|
||||
KeepAlive::Disabled
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Http service configuration
|
||||
/// HTTP service configuration.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ServiceConfig(Rc<Inner>);
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Inner {
|
||||
keep_alive: Option<Duration>,
|
||||
client_timeout: u64,
|
||||
client_disconnect: u64,
|
||||
ka_enabled: bool,
|
||||
keep_alive: KeepAlive,
|
||||
client_request_timeout: Duration,
|
||||
client_disconnect_timeout: Duration,
|
||||
secure: bool,
|
||||
local_addr: Option<std::net::SocketAddr>,
|
||||
date_service: DateService,
|
||||
}
|
||||
|
||||
impl Clone for ServiceConfig {
|
||||
fn clone(&self) -> Self {
|
||||
ServiceConfig(self.0.clone())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ServiceConfig {
|
||||
fn default() -> Self {
|
||||
Self::new(KeepAlive::Timeout(5), 0, 0, false, None)
|
||||
Self::new(
|
||||
KeepAlive::default(),
|
||||
Duration::from_secs(5),
|
||||
Duration::ZERO,
|
||||
false,
|
||||
None,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl ServiceConfig {
|
||||
/// Create instance of `ServiceConfig`
|
||||
/// Create instance of `ServiceConfig`.
|
||||
pub fn new(
|
||||
keep_alive: KeepAlive,
|
||||
client_timeout: u64,
|
||||
client_disconnect: u64,
|
||||
client_request_timeout: Duration,
|
||||
client_disconnect_timeout: Duration,
|
||||
secure: bool,
|
||||
local_addr: Option<net::SocketAddr>,
|
||||
) -> ServiceConfig {
|
||||
let (keep_alive, ka_enabled) = match keep_alive {
|
||||
KeepAlive::Timeout(val) => (val as u64, true),
|
||||
KeepAlive::Os => (0, true),
|
||||
KeepAlive::Disabled => (0, false),
|
||||
};
|
||||
let keep_alive = if ka_enabled && keep_alive > 0 {
|
||||
Some(Duration::from_secs(keep_alive))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
ServiceConfig(Rc::new(Inner {
|
||||
keep_alive,
|
||||
ka_enabled,
|
||||
client_timeout,
|
||||
client_disconnect,
|
||||
keep_alive: keep_alive.normalize(),
|
||||
client_request_timeout,
|
||||
client_disconnect_timeout,
|
||||
secure,
|
||||
local_addr,
|
||||
date_service: DateService::new(),
|
||||
}))
|
||||
}
|
||||
|
||||
/// Returns true if connection is secure (HTTPS)
|
||||
/// Returns `true` if connection is secure (i.e., using TLS / HTTPS).
|
||||
#[inline]
|
||||
pub fn secure(&self) -> bool {
|
||||
self.0.secure
|
||||
@ -114,239 +67,97 @@ impl ServiceConfig {
|
||||
self.0.local_addr
|
||||
}
|
||||
|
||||
/// Keep alive duration if configured.
|
||||
/// Connection keep-alive setting.
|
||||
#[inline]
|
||||
pub fn keep_alive(&self) -> Option<Duration> {
|
||||
pub fn keep_alive(&self) -> KeepAlive {
|
||||
self.0.keep_alive
|
||||
}
|
||||
|
||||
/// Return state of connection keep-alive functionality
|
||||
#[inline]
|
||||
pub fn keep_alive_enabled(&self) -> bool {
|
||||
self.0.ka_enabled
|
||||
}
|
||||
|
||||
/// Client timeout for first request.
|
||||
#[inline]
|
||||
pub fn client_timer(&self) -> Option<Sleep> {
|
||||
let delay_time = self.0.client_timeout;
|
||||
if delay_time != 0 {
|
||||
Some(sleep_until(self.now() + Duration::from_millis(delay_time)))
|
||||
} else {
|
||||
None
|
||||
/// Creates a time object representing the deadline for this connection's keep-alive period, if
|
||||
/// enabled.
|
||||
///
|
||||
/// When [`KeepAlive::Os`] or [`KeepAlive::Disabled`] is set, this will return `None`.
|
||||
pub fn keep_alive_deadline(&self) -> Option<Instant> {
|
||||
match self.keep_alive() {
|
||||
KeepAlive::Timeout(dur) => Some(self.now() + dur),
|
||||
KeepAlive::Os => None,
|
||||
KeepAlive::Disabled => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Client timeout for first request.
|
||||
pub fn client_timer_expire(&self) -> Option<Instant> {
|
||||
let delay = self.0.client_timeout;
|
||||
if delay != 0 {
|
||||
Some(self.now() + Duration::from_millis(delay))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
/// Creates a time object representing the deadline for the client to finish sending the head of
|
||||
/// its first request.
|
||||
///
|
||||
/// Returns `None` if this `ServiceConfig was` constructed with `client_request_timeout: 0`.
|
||||
pub fn client_request_deadline(&self) -> Option<Instant> {
|
||||
let timeout = self.0.client_request_timeout;
|
||||
(timeout != Duration::ZERO).then(|| self.now() + timeout)
|
||||
}
|
||||
|
||||
/// Client disconnect timer
|
||||
pub fn client_disconnect_timer(&self) -> Option<Instant> {
|
||||
let delay = self.0.client_disconnect;
|
||||
if delay != 0 {
|
||||
Some(self.now() + Duration::from_millis(delay))
|
||||
} else {
|
||||
None
|
||||
}
|
||||
/// Creates a time object representing the deadline for the client to disconnect.
|
||||
pub fn client_disconnect_deadline(&self) -> Option<Instant> {
|
||||
let timeout = self.0.client_disconnect_timeout;
|
||||
(timeout != Duration::ZERO).then(|| self.now() + timeout)
|
||||
}
|
||||
|
||||
/// Return keep-alive timer delay is configured.
|
||||
#[inline]
|
||||
pub fn keep_alive_timer(&self) -> Option<Sleep> {
|
||||
self.keep_alive().map(|ka| sleep_until(self.now() + ka))
|
||||
}
|
||||
|
||||
/// Keep-alive expire time
|
||||
pub fn keep_alive_expire(&self) -> Option<Instant> {
|
||||
self.keep_alive().map(|ka| self.now() + ka)
|
||||
}
|
||||
|
||||
#[inline]
|
||||
pub(crate) fn now(&self) -> Instant {
|
||||
self.0.date_service.now()
|
||||
}
|
||||
|
||||
/// Writes date header to `dst` buffer.
|
||||
///
|
||||
/// Low-level method that utilizes the built-in efficient date service, requiring fewer syscalls
|
||||
/// than normal. Note that a CRLF (`\r\n`) is included in what is written.
|
||||
#[doc(hidden)]
|
||||
pub fn set_date(&self, dst: &mut BytesMut, camel_case: bool) {
|
||||
let mut buf: [u8; 39] = [0; 39];
|
||||
pub fn write_date_header(&self, dst: &mut BytesMut, camel_case: bool) {
|
||||
let mut buf: [u8; 37] = [0; 37];
|
||||
|
||||
buf[..6].copy_from_slice(if camel_case { b"Date: " } else { b"date: " });
|
||||
|
||||
self.0
|
||||
.date_service
|
||||
.set_date(|date| buf[6..35].copy_from_slice(&date.bytes));
|
||||
.with_date(|date| buf[6..35].copy_from_slice(&date.bytes));
|
||||
|
||||
buf[35..].copy_from_slice(b"\r\n\r\n");
|
||||
buf[35..].copy_from_slice(b"\r\n");
|
||||
dst.extend_from_slice(&buf);
|
||||
}
|
||||
|
||||
pub(crate) fn set_date_header(&self, dst: &mut BytesMut) {
|
||||
#[allow(unused)] // used with `http2` feature flag
|
||||
pub(crate) fn write_date_header_value(&self, dst: &mut BytesMut) {
|
||||
self.0
|
||||
.date_service
|
||||
.set_date(|date| dst.extend_from_slice(&date.bytes));
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Copy, Clone)]
|
||||
struct Date {
|
||||
bytes: [u8; DATE_VALUE_LENGTH],
|
||||
pos: usize,
|
||||
}
|
||||
|
||||
impl Date {
|
||||
fn new() -> Date {
|
||||
let mut date = Date {
|
||||
bytes: [0; DATE_VALUE_LENGTH],
|
||||
pos: 0,
|
||||
};
|
||||
date.update();
|
||||
date
|
||||
}
|
||||
|
||||
fn update(&mut self) {
|
||||
self.pos = 0;
|
||||
write!(self, "{}", httpdate::fmt_http_date(SystemTime::now())).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Write for Date {
|
||||
fn write_str(&mut self, s: &str) -> fmt::Result {
|
||||
let len = s.len();
|
||||
self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes());
|
||||
self.pos += len;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Service for update Date and Instant periodically at 500 millis interval.
|
||||
struct DateService {
|
||||
current: Rc<Cell<(Date, Instant)>>,
|
||||
handle: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl Drop for DateService {
|
||||
fn drop(&mut self) {
|
||||
// stop the timer update async task on drop.
|
||||
self.handle.abort();
|
||||
}
|
||||
}
|
||||
|
||||
impl DateService {
|
||||
fn new() -> Self {
|
||||
// shared date and timer for DateService and update async task.
|
||||
let current = Rc::new(Cell::new((Date::new(), Instant::now())));
|
||||
let current_clone = Rc::clone(¤t);
|
||||
// spawn an async task sleep for 500 milli and update current date/timer in a loop.
|
||||
// handle is used to stop the task on DateService drop.
|
||||
let handle = actix_rt::spawn(async move {
|
||||
#[cfg(test)]
|
||||
let _notify = notify_on_drop::NotifyOnDrop::new();
|
||||
|
||||
let mut interval = interval(Duration::from_millis(500));
|
||||
loop {
|
||||
let now = interval.tick().await;
|
||||
let date = Date::new();
|
||||
current_clone.set((date, now));
|
||||
}
|
||||
});
|
||||
|
||||
DateService { current, handle }
|
||||
}
|
||||
|
||||
fn now(&self) -> Instant {
|
||||
self.current.get().1
|
||||
}
|
||||
|
||||
fn set_date<F: FnMut(&Date)>(&self, mut f: F) {
|
||||
f(&self.current.get().0);
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: move to a util module for testing all spawn handle drop style tasks.
|
||||
/// Test Module for checking the drop state of certain async tasks that are spawned
|
||||
/// with `actix_rt::spawn`
|
||||
///
|
||||
/// The target task must explicitly generate `NotifyOnDrop` when spawn the task
|
||||
#[cfg(test)]
|
||||
mod notify_on_drop {
|
||||
use std::cell::RefCell;
|
||||
|
||||
thread_local! {
|
||||
static NOTIFY_DROPPED: RefCell<Option<bool>> = RefCell::new(None);
|
||||
}
|
||||
|
||||
/// Check if the spawned task is dropped.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics when there was no `NotifyOnDrop` instance on current thread.
|
||||
pub(crate) fn is_dropped() -> bool {
|
||||
NOTIFY_DROPPED.with(|bool| {
|
||||
bool.borrow()
|
||||
.expect("No NotifyOnDrop existed on current thread")
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) struct NotifyOnDrop;
|
||||
|
||||
impl NotifyOnDrop {
|
||||
/// # Panic:
|
||||
///
|
||||
/// When construct multiple instances on any given thread.
|
||||
pub(crate) fn new() -> Self {
|
||||
NOTIFY_DROPPED.with(|bool| {
|
||||
let mut bool = bool.borrow_mut();
|
||||
if bool.is_some() {
|
||||
panic!("NotifyOnDrop existed on current thread");
|
||||
} else {
|
||||
*bool = Some(false);
|
||||
}
|
||||
});
|
||||
|
||||
NotifyOnDrop
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for NotifyOnDrop {
|
||||
fn drop(&mut self) {
|
||||
NOTIFY_DROPPED.with(|bool| {
|
||||
if let Some(b) = bool.borrow_mut().as_mut() {
|
||||
*b = true;
|
||||
}
|
||||
});
|
||||
}
|
||||
.with_date(|date| dst.extend_from_slice(&date.bytes));
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{date::DATE_VALUE_LENGTH, notify_on_drop};
|
||||
|
||||
use actix_rt::{task::yield_now, time::sleep};
|
||||
use actix_rt::{
|
||||
task::yield_now,
|
||||
time::{sleep, sleep_until},
|
||||
};
|
||||
use memchr::memmem;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_date_service_update() {
|
||||
let settings = ServiceConfig::new(KeepAlive::Os, 0, 0, false, None);
|
||||
let settings =
|
||||
ServiceConfig::new(KeepAlive::Os, Duration::ZERO, Duration::ZERO, false, None);
|
||||
|
||||
yield_now().await;
|
||||
|
||||
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
|
||||
settings.set_date(&mut buf1, false);
|
||||
settings.write_date_header(&mut buf1, false);
|
||||
let now1 = settings.now();
|
||||
|
||||
sleep_until(Instant::now() + Duration::from_secs(2)).await;
|
||||
sleep_until((Instant::now() + Duration::from_secs(2)).into()).await;
|
||||
yield_now().await;
|
||||
|
||||
let now2 = settings.now();
|
||||
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
|
||||
settings.set_date(&mut buf2, false);
|
||||
settings.write_date_header(&mut buf2, false);
|
||||
|
||||
assert_ne!(now1, now2);
|
||||
|
||||
@ -402,10 +213,10 @@ mod tests {
|
||||
let settings = ServiceConfig::default();
|
||||
|
||||
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
|
||||
settings.set_date(&mut buf1, false);
|
||||
settings.write_date_header(&mut buf1, false);
|
||||
|
||||
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
|
||||
settings.set_date(&mut buf2, false);
|
||||
settings.write_date_header(&mut buf2, false);
|
||||
|
||||
assert_eq!(buf1, buf2);
|
||||
}
|
||||
@ -415,11 +226,11 @@ mod tests {
|
||||
let settings = ServiceConfig::default();
|
||||
|
||||
let mut buf = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
|
||||
settings.set_date(&mut buf, false);
|
||||
settings.write_date_header(&mut buf, false);
|
||||
assert!(memmem::find(&buf, b"date:").is_some());
|
||||
|
||||
let mut buf = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
|
||||
settings.set_date(&mut buf, true);
|
||||
settings.write_date_header(&mut buf, true);
|
||||
assert!(memmem::find(&buf, b"Date:").is_some());
|
||||
}
|
||||
}
|
||||
|
92
actix-http/src/date.rs
Normal file
92
actix-http/src/date.rs
Normal file
@ -0,0 +1,92 @@
|
||||
use std::{
|
||||
cell::Cell,
|
||||
fmt::{self, Write},
|
||||
rc::Rc,
|
||||
time::{Duration, Instant, SystemTime},
|
||||
};
|
||||
|
||||
use actix_rt::{task::JoinHandle, time::interval};
|
||||
|
||||
/// "Thu, 01 Jan 1970 00:00:00 GMT".len()
|
||||
pub(crate) const DATE_VALUE_LENGTH: usize = 29;
|
||||
|
||||
#[derive(Clone, Copy)]
|
||||
pub(crate) struct Date {
|
||||
pub(crate) bytes: [u8; DATE_VALUE_LENGTH],
|
||||
pos: usize,
|
||||
}
|
||||
|
||||
impl Date {
|
||||
fn new() -> Date {
|
||||
let mut date = Date {
|
||||
bytes: [0; DATE_VALUE_LENGTH],
|
||||
pos: 0,
|
||||
};
|
||||
date.update();
|
||||
date
|
||||
}
|
||||
|
||||
fn update(&mut self) {
|
||||
self.pos = 0;
|
||||
write!(self, "{}", httpdate::fmt_http_date(SystemTime::now())).unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Write for Date {
|
||||
fn write_str(&mut self, s: &str) -> fmt::Result {
|
||||
let len = s.len();
|
||||
self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes());
|
||||
self.pos += len;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Service for update Date and Instant periodically at 500 millis interval.
|
||||
pub(crate) struct DateService {
|
||||
current: Rc<Cell<(Date, Instant)>>,
|
||||
handle: JoinHandle<()>,
|
||||
}
|
||||
|
||||
impl DateService {
|
||||
pub(crate) fn new() -> Self {
|
||||
// shared date and timer for DateService and update async task.
|
||||
let current = Rc::new(Cell::new((Date::new(), Instant::now())));
|
||||
let current_clone = Rc::clone(¤t);
|
||||
// spawn an async task sleep for 500 millis and update current date/timer in a loop.
|
||||
// handle is used to stop the task on DateService drop.
|
||||
let handle = actix_rt::spawn(async move {
|
||||
#[cfg(test)]
|
||||
let _notify = crate::notify_on_drop::NotifyOnDrop::new();
|
||||
|
||||
let mut interval = interval(Duration::from_millis(500));
|
||||
loop {
|
||||
let now = interval.tick().await;
|
||||
let date = Date::new();
|
||||
current_clone.set((date, now.into_std()));
|
||||
}
|
||||
});
|
||||
|
||||
DateService { current, handle }
|
||||
}
|
||||
|
||||
pub(crate) fn now(&self) -> Instant {
|
||||
self.current.get().1
|
||||
}
|
||||
|
||||
pub(crate) fn with_date<F: FnMut(&Date)>(&self, mut f: F) {
|
||||
f(&self.current.get().0);
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for DateService {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("DateService").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for DateService {
|
||||
fn drop(&mut self) {
|
||||
// stop the timer update async task on drop.
|
||||
self.handle.abort();
|
||||
}
|
||||
}
|
@ -19,7 +19,7 @@ use zstd::stream::write::Decoder as ZstdDecoder;
|
||||
|
||||
use crate::{
|
||||
encoding::Writer,
|
||||
error::{BlockingError, PayloadError},
|
||||
error::PayloadError,
|
||||
header::{ContentEncoding, HeaderMap, CONTENT_ENCODING},
|
||||
};
|
||||
|
||||
@ -47,14 +47,17 @@ where
|
||||
ContentEncoding::Brotli => Some(ContentDecoder::Brotli(Box::new(
|
||||
brotli::DecompressorWriter::new(Writer::new(), 8_096),
|
||||
))),
|
||||
|
||||
#[cfg(feature = "compress-gzip")]
|
||||
ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new(
|
||||
ZlibDecoder::new(Writer::new()),
|
||||
))),
|
||||
|
||||
#[cfg(feature = "compress-gzip")]
|
||||
ContentEncoding::Gzip => Some(ContentDecoder::Gzip(Box::new(GzDecoder::new(
|
||||
Writer::new(),
|
||||
)))),
|
||||
|
||||
#[cfg(feature = "compress-zstd")]
|
||||
ContentEncoding::Zstd => Some(ContentDecoder::Zstd(Box::new(
|
||||
ZstdDecoder::new(Writer::new()).expect(
|
||||
@ -98,8 +101,12 @@ where
|
||||
|
||||
loop {
|
||||
if let Some(ref mut fut) = this.fut {
|
||||
let (chunk, decoder) =
|
||||
ready!(Pin::new(fut).poll(cx)).map_err(|_| BlockingError)??;
|
||||
let (chunk, decoder) = ready!(Pin::new(fut).poll(cx)).map_err(|_| {
|
||||
PayloadError::Io(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"Blocking task was cancelled unexpectedly",
|
||||
))
|
||||
})??;
|
||||
|
||||
*this.decoder = Some(decoder);
|
||||
this.fut.take();
|
||||
@ -159,10 +166,13 @@ where
|
||||
enum ContentDecoder {
|
||||
#[cfg(feature = "compress-gzip")]
|
||||
Deflate(Box<ZlibDecoder<Writer>>),
|
||||
|
||||
#[cfg(feature = "compress-gzip")]
|
||||
Gzip(Box<GzDecoder<Writer>>),
|
||||
|
||||
#[cfg(feature = "compress-brotli")]
|
||||
Brotli(Box<brotli::DecompressorWriter<Writer>>),
|
||||
|
||||
// We need explicit 'static lifetime here because ZstdDecoder need lifetime
|
||||
// argument, and we use `spawn_blocking` in `Decoder::poll_next` that require `FnOnce() -> R + Send + 'static`
|
||||
#[cfg(feature = "compress-zstd")]
|
||||
|
@ -17,13 +17,13 @@ use pin_project_lite::pin_project;
|
||||
#[cfg(feature = "compress-gzip")]
|
||||
use flate2::write::{GzEncoder, ZlibEncoder};
|
||||
|
||||
use tracing::trace;
|
||||
#[cfg(feature = "compress-zstd")]
|
||||
use zstd::stream::write::Encoder as ZstdEncoder;
|
||||
|
||||
use super::Writer;
|
||||
use crate::{
|
||||
body::{self, BodySize, MessageBody},
|
||||
error::BlockingError,
|
||||
header::{self, ContentEncoding, HeaderValue, CONTENT_ENCODING},
|
||||
ResponseHead, StatusCode,
|
||||
};
|
||||
@ -173,7 +173,12 @@ where
|
||||
|
||||
if let Some(ref mut fut) = this.fut {
|
||||
let mut encoder = ready!(Pin::new(fut).poll(cx))
|
||||
.map_err(|_| EncoderError::Blocking(BlockingError))?
|
||||
.map_err(|_| {
|
||||
EncoderError::Io(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"Blocking task was cancelled unexpectedly",
|
||||
))
|
||||
})?
|
||||
.map_err(EncoderError::Io)?;
|
||||
|
||||
let chunk = encoder.take();
|
||||
@ -252,7 +257,7 @@ fn update_head(encoding: ContentEncoding, head: &mut ResponseHead) {
|
||||
head.headers_mut()
|
||||
.insert(header::CONTENT_ENCODING, encoding.to_header_value());
|
||||
head.headers_mut()
|
||||
.insert(header::VARY, HeaderValue::from_static("accept-encoding"));
|
||||
.append(header::VARY, HeaderValue::from_static("accept-encoding"));
|
||||
|
||||
head.no_chunking(false);
|
||||
}
|
||||
@ -391,21 +396,20 @@ impl ContentEncoder {
|
||||
fn new_brotli_compressor() -> Box<brotli::CompressorWriter<Writer>> {
|
||||
Box::new(brotli::CompressorWriter::new(
|
||||
Writer::new(),
|
||||
8 * 1024, // 32 KiB buffer
|
||||
3, // BROTLI_PARAM_QUALITY
|
||||
22, // BROTLI_PARAM_LGWIN
|
||||
32 * 1024, // 32 KiB buffer
|
||||
3, // BROTLI_PARAM_QUALITY
|
||||
22, // BROTLI_PARAM_LGWIN
|
||||
))
|
||||
}
|
||||
|
||||
#[derive(Debug, Display)]
|
||||
#[non_exhaustive]
|
||||
pub enum EncoderError {
|
||||
/// Wrapped body stream error.
|
||||
#[display(fmt = "body")]
|
||||
Body(Box<dyn StdError>),
|
||||
|
||||
#[display(fmt = "blocking")]
|
||||
Blocking(BlockingError),
|
||||
|
||||
/// Generic I/O error.
|
||||
#[display(fmt = "io")]
|
||||
Io(io::Error),
|
||||
}
|
||||
@ -414,7 +418,6 @@ impl StdError for EncoderError {
|
||||
fn source(&self) -> Option<&(dyn StdError + 'static)> {
|
||||
match self {
|
||||
EncoderError::Body(err) => Some(&**err),
|
||||
EncoderError::Blocking(err) => Some(err),
|
||||
EncoderError::Io(err) => Some(err),
|
||||
}
|
||||
}
|
||||
|
@ -5,7 +5,7 @@ use std::{error::Error as StdError, fmt, io, str::Utf8Error, string::FromUtf8Err
|
||||
use derive_more::{Display, Error, From};
|
||||
use http::{uri::InvalidUri, StatusCode};
|
||||
|
||||
use crate::{body::BoxBody, ws, Response};
|
||||
use crate::{body::BoxBody, Response};
|
||||
|
||||
pub use http::Error as HttpError;
|
||||
|
||||
@ -51,7 +51,7 @@ impl Error {
|
||||
Self::new(Kind::SendResponse)
|
||||
}
|
||||
|
||||
#[allow(unused)] // reserved for future use (TODO: remove allow when being used)
|
||||
#[allow(unused)] // available for future use
|
||||
pub(crate) fn new_io() -> Self {
|
||||
Self::new(Kind::Io)
|
||||
}
|
||||
@ -61,6 +61,7 @@ impl Error {
|
||||
Self::new(Kind::Encoder)
|
||||
}
|
||||
|
||||
#[allow(unused)] // used with `ws` feature flag
|
||||
pub(crate) fn new_ws() -> Self {
|
||||
Self::new(Kind::Ws)
|
||||
}
|
||||
@ -107,8 +108,10 @@ pub(crate) enum Kind {
|
||||
|
||||
impl fmt::Debug for Error {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
// TODO: more detail
|
||||
f.write_str("actix_http::Error")
|
||||
f.debug_struct("actix_http::Error")
|
||||
.field("kind", &self.inner.kind)
|
||||
.field("cause", &self.inner.cause)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
@ -139,14 +142,16 @@ impl From<HttpError> for Error {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ws::HandshakeError> for Error {
|
||||
fn from(err: ws::HandshakeError) -> Self {
|
||||
#[cfg(feature = "ws")]
|
||||
impl From<crate::ws::HandshakeError> for Error {
|
||||
fn from(err: crate::ws::HandshakeError) -> Self {
|
||||
Self::new_ws().with_cause(err)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ws::ProtocolError> for Error {
|
||||
fn from(err: ws::ProtocolError) -> Self {
|
||||
#[cfg(feature = "ws")]
|
||||
impl From<crate::ws::ProtocolError> for Error {
|
||||
fn from(err: crate::ws::ProtocolError) -> Self {
|
||||
Self::new_ws().with_cause(err)
|
||||
}
|
||||
}
|
||||
@ -247,12 +252,6 @@ impl From<ParseError> for Response<BoxBody> {
|
||||
}
|
||||
}
|
||||
|
||||
/// A set of errors that can occur running blocking tasks in thread pool.
|
||||
#[derive(Debug, Display, Error)]
|
||||
#[display(fmt = "Blocking thread pool is gone")]
|
||||
// TODO: non-exhaustive
|
||||
pub struct BlockingError;
|
||||
|
||||
/// A set of errors that can occur during payload parsing.
|
||||
#[derive(Debug, Display)]
|
||||
#[non_exhaustive]
|
||||
@ -277,8 +276,9 @@ pub enum PayloadError {
|
||||
UnknownLength,
|
||||
|
||||
/// HTTP/2 payload error.
|
||||
#[cfg(feature = "http2")]
|
||||
#[display(fmt = "{}", _0)]
|
||||
Http2Payload(h2::Error),
|
||||
Http2Payload(::h2::Error),
|
||||
|
||||
/// Generic I/O error.
|
||||
#[display(fmt = "{}", _0)]
|
||||
@ -289,18 +289,21 @@ impl std::error::Error for PayloadError {
|
||||
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
|
||||
match self {
|
||||
PayloadError::Incomplete(None) => None,
|
||||
PayloadError::Incomplete(Some(err)) => Some(err as &dyn std::error::Error),
|
||||
PayloadError::Incomplete(Some(err)) => Some(err),
|
||||
PayloadError::EncodingCorrupted => None,
|
||||
PayloadError::Overflow => None,
|
||||
PayloadError::UnknownLength => None,
|
||||
PayloadError::Http2Payload(err) => Some(err as &dyn std::error::Error),
|
||||
PayloadError::Io(err) => Some(err as &dyn std::error::Error),
|
||||
#[cfg(feature = "http2")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
|
||||
PayloadError::Http2Payload(err) => Some(err),
|
||||
PayloadError::Io(err) => Some(err),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<h2::Error> for PayloadError {
|
||||
fn from(err: h2::Error) -> Self {
|
||||
#[cfg(feature = "http2")]
|
||||
impl From<::h2::Error> for PayloadError {
|
||||
fn from(err: ::h2::Error) -> Self {
|
||||
PayloadError::Http2Payload(err)
|
||||
}
|
||||
}
|
||||
@ -317,15 +320,6 @@ impl From<io::Error> for PayloadError {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<BlockingError> for PayloadError {
|
||||
fn from(_: BlockingError) -> Self {
|
||||
PayloadError::Io(io::Error::new(
|
||||
io::ErrorKind::Other,
|
||||
"Operation is canceled",
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<PayloadError> for Error {
|
||||
fn from(err: PayloadError) -> Self {
|
||||
Self::new_payload().with_cause(err)
|
||||
@ -334,6 +328,7 @@ impl From<PayloadError> for Error {
|
||||
|
||||
/// A set of errors that can occur during dispatching HTTP requests.
|
||||
#[derive(Debug, Display, From)]
|
||||
#[non_exhaustive]
|
||||
pub enum DispatchError {
|
||||
/// Service error.
|
||||
#[display(fmt = "Service Error")]
|
||||
@ -356,6 +351,8 @@ pub enum DispatchError {
|
||||
|
||||
/// HTTP/2 error.
|
||||
#[display(fmt = "{}", _0)]
|
||||
#[cfg(feature = "http2")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
|
||||
H2(h2::Error),
|
||||
|
||||
/// The first request did not complete within the specified timeout.
|
||||
@ -366,6 +363,10 @@ pub enum DispatchError {
|
||||
#[display(fmt = "Connection shutdown timeout")]
|
||||
DisconnectTimeout,
|
||||
|
||||
/// Handler dropped payload before reading EOF.
|
||||
#[display(fmt = "Handler dropped payload before reading EOF")]
|
||||
HandlerDroppedPayload,
|
||||
|
||||
/// Internal error.
|
||||
#[display(fmt = "Internal error")]
|
||||
InternalError,
|
||||
@ -374,12 +375,14 @@ pub enum DispatchError {
|
||||
impl StdError for DispatchError {
|
||||
fn source(&self) -> Option<&(dyn StdError + 'static)> {
|
||||
match self {
|
||||
// TODO: error source extraction?
|
||||
DispatchError::Service(_res) => None,
|
||||
DispatchError::Body(err) => Some(&**err),
|
||||
DispatchError::Io(err) => Some(err),
|
||||
DispatchError::Parse(err) => Some(err),
|
||||
|
||||
#[cfg(feature = "http2")]
|
||||
DispatchError::H2(err) => Some(err),
|
||||
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
@ -387,7 +390,7 @@ impl StdError for DispatchError {
|
||||
|
||||
/// A set of error that can occur during parsing content type.
|
||||
#[derive(Debug, Display, Error)]
|
||||
#[cfg_attr(test, derive(PartialEq))]
|
||||
#[cfg_attr(test, derive(PartialEq, Eq))]
|
||||
#[non_exhaustive]
|
||||
pub enum ContentTypeError {
|
||||
/// Can not parse content type
|
||||
|
@ -1,9 +1,30 @@
|
||||
use std::{
|
||||
any::{Any, TypeId},
|
||||
collections::HashMap,
|
||||
fmt,
|
||||
hash::{BuildHasherDefault, Hasher},
|
||||
};
|
||||
|
||||
use ahash::AHashMap;
|
||||
/// A hasher for `TypeId`s that takes advantage of its known characteristics.
|
||||
///
|
||||
/// Author of `anymap` crate has done research on the topic:
|
||||
/// https://github.com/chris-morgan/anymap/blob/2e9a5704/src/lib.rs#L599
|
||||
#[derive(Debug, Default)]
|
||||
struct NoOpHasher(u64);
|
||||
|
||||
impl Hasher for NoOpHasher {
|
||||
fn write(&mut self, _bytes: &[u8]) {
|
||||
unimplemented!("This NoOpHasher can only handle u64s")
|
||||
}
|
||||
|
||||
fn write_u64(&mut self, i: u64) {
|
||||
self.0 = i;
|
||||
}
|
||||
|
||||
fn finish(&self) -> u64 {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
/// A type map for request extensions.
|
||||
///
|
||||
@ -11,7 +32,7 @@ use ahash::AHashMap;
|
||||
#[derive(Default)]
|
||||
pub struct Extensions {
|
||||
/// Use AHasher with a std HashMap with for faster lookups on the small `TypeId` keys.
|
||||
map: AHashMap<TypeId, Box<dyn Any>>,
|
||||
map: HashMap<TypeId, Box<dyn Any>, BuildHasherDefault<NoOpHasher>>,
|
||||
}
|
||||
|
||||
impl Extensions {
|
||||
@ -19,7 +40,7 @@ impl Extensions {
|
||||
#[inline]
|
||||
pub fn new() -> Extensions {
|
||||
Extensions {
|
||||
map: AHashMap::new(),
|
||||
map: HashMap::default(),
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1,6 +1,7 @@
|
||||
use std::{io, task::Poll};
|
||||
|
||||
use bytes::{Buf as _, Bytes, BytesMut};
|
||||
use tracing::{debug, trace};
|
||||
|
||||
macro_rules! byte (
|
||||
($rdr:ident) => ({
|
||||
@ -14,7 +15,7 @@ macro_rules! byte (
|
||||
})
|
||||
);
|
||||
|
||||
#[derive(Debug, PartialEq, Clone)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub(super) enum ChunkedState {
|
||||
Size,
|
||||
SizeLws,
|
||||
@ -70,13 +71,13 @@ impl ChunkedState {
|
||||
|
||||
match size.checked_mul(radix) {
|
||||
Some(n) => {
|
||||
*size = n as u64;
|
||||
*size = n;
|
||||
*size += rem as u64;
|
||||
|
||||
Poll::Ready(Ok(ChunkedState::Size))
|
||||
}
|
||||
None => {
|
||||
log::debug!("chunk size would overflow u64");
|
||||
debug!("chunk size would overflow u64");
|
||||
Poll::Ready(Err(io::Error::new(
|
||||
io::ErrorKind::InvalidInput,
|
||||
"Invalid chunk size line: Size is too big",
|
||||
@ -124,7 +125,7 @@ impl ChunkedState {
|
||||
rem: &mut u64,
|
||||
buf: &mut Option<Bytes>,
|
||||
) -> Poll<Result<ChunkedState, io::Error>> {
|
||||
log::trace!("Chunked read, remaining={:?}", rem);
|
||||
trace!("Chunked read, remaining={:?}", rem);
|
||||
|
||||
let len = rdr.len() as u64;
|
||||
if len == 0 {
|
||||
|
@ -1,9 +1,9 @@
|
||||
use std::io;
|
||||
use std::{fmt, io};
|
||||
|
||||
use actix_codec::{Decoder, Encoder};
|
||||
use bitflags::bitflags;
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use http::{Method, Version};
|
||||
use tokio_util::codec::{Decoder, Encoder};
|
||||
|
||||
use super::{
|
||||
decoder::{self, PayloadDecoder, PayloadItem, PayloadType},
|
||||
@ -17,9 +17,9 @@ use crate::{
|
||||
|
||||
bitflags! {
|
||||
struct Flags: u8 {
|
||||
const HEAD = 0b0000_0001;
|
||||
const KEEPALIVE_ENABLED = 0b0000_1000;
|
||||
const STREAM = 0b0001_0000;
|
||||
const HEAD = 0b0000_0001;
|
||||
const KEEP_ALIVE_ENABLED = 0b0000_1000;
|
||||
const STREAM = 0b0001_0000;
|
||||
}
|
||||
}
|
||||
|
||||
@ -38,7 +38,7 @@ struct ClientCodecInner {
|
||||
decoder: decoder::MessageDecoder<ResponseHead>,
|
||||
payload: Option<PayloadDecoder>,
|
||||
version: Version,
|
||||
ctype: ConnectionType,
|
||||
conn_type: ConnectionType,
|
||||
|
||||
// encoder part
|
||||
flags: Flags,
|
||||
@ -51,23 +51,32 @@ impl Default for ClientCodec {
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Debug for ClientCodec {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
f.debug_struct("h1::ClientCodec")
|
||||
.field("flags", &self.inner.flags)
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl ClientCodec {
|
||||
/// Create HTTP/1 codec.
|
||||
///
|
||||
/// `keepalive_enabled` how response `connection` header get generated.
|
||||
pub fn new(config: ServiceConfig) -> Self {
|
||||
let flags = if config.keep_alive_enabled() {
|
||||
Flags::KEEPALIVE_ENABLED
|
||||
let flags = if config.keep_alive().enabled() {
|
||||
Flags::KEEP_ALIVE_ENABLED
|
||||
} else {
|
||||
Flags::empty()
|
||||
};
|
||||
|
||||
ClientCodec {
|
||||
inner: ClientCodecInner {
|
||||
config,
|
||||
decoder: decoder::MessageDecoder::default(),
|
||||
payload: None,
|
||||
version: Version::HTTP_11,
|
||||
ctype: ConnectionType::Close,
|
||||
conn_type: ConnectionType::Close,
|
||||
|
||||
flags,
|
||||
encoder: encoder::MessageEncoder::default(),
|
||||
@ -77,12 +86,12 @@ impl ClientCodec {
|
||||
|
||||
/// Check if request is upgrade
|
||||
pub fn upgrade(&self) -> bool {
|
||||
self.inner.ctype == ConnectionType::Upgrade
|
||||
self.inner.conn_type == ConnectionType::Upgrade
|
||||
}
|
||||
|
||||
/// Check if last response is keep-alive
|
||||
pub fn keepalive(&self) -> bool {
|
||||
self.inner.ctype == ConnectionType::KeepAlive
|
||||
pub fn keep_alive(&self) -> bool {
|
||||
self.inner.conn_type == ConnectionType::KeepAlive
|
||||
}
|
||||
|
||||
/// Check last request's message type
|
||||
@ -104,8 +113,8 @@ impl ClientCodec {
|
||||
|
||||
impl ClientPayloadCodec {
|
||||
/// Check if last response is keep-alive
|
||||
pub fn keepalive(&self) -> bool {
|
||||
self.inner.ctype == ConnectionType::KeepAlive
|
||||
pub fn keep_alive(&self) -> bool {
|
||||
self.inner.conn_type == ConnectionType::KeepAlive
|
||||
}
|
||||
|
||||
/// Transform payload codec to a message codec
|
||||
@ -119,15 +128,18 @@ impl Decoder for ClientCodec {
|
||||
type Error = ParseError;
|
||||
|
||||
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
|
||||
debug_assert!(!self.inner.payload.is_some(), "Payload decoder is set");
|
||||
debug_assert!(
|
||||
self.inner.payload.is_none(),
|
||||
"Payload decoder should not be set"
|
||||
);
|
||||
|
||||
if let Some((req, payload)) = self.inner.decoder.decode(src)? {
|
||||
if let Some(ctype) = req.conn_type() {
|
||||
if let Some(conn_type) = req.conn_type() {
|
||||
// do not use peer's keep-alive
|
||||
self.inner.ctype = if ctype == ConnectionType::KeepAlive {
|
||||
self.inner.ctype
|
||||
self.inner.conn_type = if conn_type == ConnectionType::KeepAlive {
|
||||
self.inner.conn_type
|
||||
} else {
|
||||
ctype
|
||||
conn_type
|
||||
};
|
||||
}
|
||||
|
||||
@ -192,9 +204,9 @@ impl Encoder<Message<(RequestHeadType, BodySize)>> for ClientCodec {
|
||||
.set(Flags::HEAD, head.as_ref().method == Method::HEAD);
|
||||
|
||||
// connection status
|
||||
inner.ctype = match head.as_ref().connection_type() {
|
||||
inner.conn_type = match head.as_ref().connection_type() {
|
||||
ConnectionType::KeepAlive => {
|
||||
if inner.flags.contains(Flags::KEEPALIVE_ENABLED) {
|
||||
if inner.flags.contains(Flags::KEEP_ALIVE_ENABLED) {
|
||||
ConnectionType::KeepAlive
|
||||
} else {
|
||||
ConnectionType::Close
|
||||
@ -211,7 +223,7 @@ impl Encoder<Message<(RequestHeadType, BodySize)>> for ClientCodec {
|
||||
false,
|
||||
inner.version,
|
||||
length,
|
||||
inner.ctype,
|
||||
inner.conn_type,
|
||||
&inner.config,
|
||||
)?;
|
||||
}
|
||||
|
@ -1,9 +1,9 @@
|
||||
use std::{fmt, io};
|
||||
|
||||
use actix_codec::{Decoder, Encoder};
|
||||
use bitflags::bitflags;
|
||||
use bytes::BytesMut;
|
||||
use http::{Method, Version};
|
||||
use tokio_util::codec::{Decoder, Encoder};
|
||||
|
||||
use super::{
|
||||
decoder::{self, PayloadDecoder, PayloadItem, PayloadType},
|
||||
@ -15,9 +15,9 @@ use crate::{
|
||||
|
||||
bitflags! {
|
||||
struct Flags: u8 {
|
||||
const HEAD = 0b0000_0001;
|
||||
const KEEPALIVE_ENABLED = 0b0000_0010;
|
||||
const STREAM = 0b0000_0100;
|
||||
const HEAD = 0b0000_0001;
|
||||
const KEEP_ALIVE_ENABLED = 0b0000_0010;
|
||||
const STREAM = 0b0000_0100;
|
||||
}
|
||||
}
|
||||
|
||||
@ -42,7 +42,9 @@ impl Default for Codec {
|
||||
|
||||
impl fmt::Debug for Codec {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "h1::Codec({:?})", self.flags)
|
||||
f.debug_struct("h1::Codec")
|
||||
.field("flags", &self.flags)
|
||||
.finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
@ -51,8 +53,8 @@ impl Codec {
|
||||
///
|
||||
/// `keepalive_enabled` how response `connection` header get generated.
|
||||
pub fn new(config: ServiceConfig) -> Self {
|
||||
let flags = if config.keep_alive_enabled() {
|
||||
Flags::KEEPALIVE_ENABLED
|
||||
let flags = if config.keep_alive().enabled() {
|
||||
Flags::KEEP_ALIVE_ENABLED
|
||||
} else {
|
||||
Flags::empty()
|
||||
};
|
||||
@ -76,14 +78,14 @@ impl Codec {
|
||||
|
||||
/// Check if last response is keep-alive.
|
||||
#[inline]
|
||||
pub fn keepalive(&self) -> bool {
|
||||
pub fn keep_alive(&self) -> bool {
|
||||
self.conn_type == ConnectionType::KeepAlive
|
||||
}
|
||||
|
||||
/// Check if keep-alive enabled on server level.
|
||||
#[inline]
|
||||
pub fn keepalive_enabled(&self) -> bool {
|
||||
self.flags.contains(Flags::KEEPALIVE_ENABLED)
|
||||
pub fn keep_alive_enabled(&self) -> bool {
|
||||
self.flags.contains(Flags::KEEP_ALIVE_ENABLED)
|
||||
}
|
||||
|
||||
/// Check last request's message type.
|
||||
@ -123,11 +125,13 @@ impl Decoder for Codec {
|
||||
self.flags.set(Flags::HEAD, head.method == Method::HEAD);
|
||||
self.version = head.version;
|
||||
self.conn_type = head.connection_type();
|
||||
|
||||
if self.conn_type == ConnectionType::KeepAlive
|
||||
&& !self.flags.contains(Flags::KEEPALIVE_ENABLED)
|
||||
&& !self.flags.contains(Flags::KEEP_ALIVE_ENABLED)
|
||||
{
|
||||
self.conn_type = ConnectionType::Close
|
||||
}
|
||||
|
||||
match payload {
|
||||
PayloadType::None => self.payload = None,
|
||||
PayloadType::Payload(pl) => self.payload = Some(pl),
|
||||
@ -179,9 +183,11 @@ impl Encoder<Message<(Response<()>, BodySize)>> for Codec {
|
||||
&self.config,
|
||||
)?;
|
||||
}
|
||||
|
||||
Message::Chunk(Some(bytes)) => {
|
||||
self.encoder.encode_chunk(bytes.as_ref(), dst)?;
|
||||
}
|
||||
|
||||
Message::Chunk(None) => {
|
||||
self.encoder.encode_eof(dst)?;
|
||||
}
|
||||
|
@ -6,7 +6,7 @@ use http::{
|
||||
header::{self, HeaderName, HeaderValue},
|
||||
Method, StatusCode, Uri, Version,
|
||||
};
|
||||
use log::{debug, error, trace};
|
||||
use tracing::{debug, error, trace};
|
||||
|
||||
use super::chunked::ChunkedState;
|
||||
use crate::{error::ParseError, header::HeaderMap, ConnectionType, Request, ResponseHead};
|
||||
@ -46,6 +46,23 @@ pub(crate) enum PayloadLength {
|
||||
None,
|
||||
}
|
||||
|
||||
impl PayloadLength {
|
||||
/// Returns true if variant is `None`.
|
||||
fn is_none(&self) -> bool {
|
||||
matches!(self, Self::None)
|
||||
}
|
||||
|
||||
/// Returns true if variant is represents zero-length (not none) payload.
|
||||
fn is_zero(&self) -> bool {
|
||||
matches!(
|
||||
self,
|
||||
PayloadLength::Payload(PayloadType::Payload(PayloadDecoder {
|
||||
kind: Kind::Length(0)
|
||||
}))
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) trait MessageType: Sized {
|
||||
fn set_connection_type(&mut self, conn_type: Option<ConnectionType>);
|
||||
|
||||
@ -59,6 +76,7 @@ pub(crate) trait MessageType: Sized {
|
||||
&mut self,
|
||||
slice: &Bytes,
|
||||
raw_headers: &[HeaderIndex],
|
||||
version: Version,
|
||||
) -> Result<PayloadLength, ParseError> {
|
||||
let mut ka = None;
|
||||
let mut has_upgrade_websocket = false;
|
||||
@ -87,21 +105,23 @@ pub(crate) trait MessageType: Sized {
|
||||
return Err(ParseError::Header);
|
||||
}
|
||||
|
||||
header::CONTENT_LENGTH => match value.to_str() {
|
||||
Ok(s) if s.trim().starts_with('+') => {
|
||||
debug!("illegal Content-Length: {:?}", s);
|
||||
header::CONTENT_LENGTH => match value.to_str().map(str::trim) {
|
||||
Ok(val) if val.starts_with('+') => {
|
||||
debug!("illegal Content-Length: {:?}", val);
|
||||
return Err(ParseError::Header);
|
||||
}
|
||||
Ok(s) => {
|
||||
if let Ok(len) = s.parse::<u64>() {
|
||||
if len != 0 {
|
||||
content_length = Some(len);
|
||||
}
|
||||
|
||||
Ok(val) => {
|
||||
if let Ok(len) = val.parse::<u64>() {
|
||||
// accept 0 lengths here and remove them in `decode` after all
|
||||
// headers have been processed to prevent request smuggling issues
|
||||
content_length = Some(len);
|
||||
} else {
|
||||
debug!("illegal Content-Length: {:?}", s);
|
||||
debug!("illegal Content-Length: {:?}", val);
|
||||
return Err(ParseError::Header);
|
||||
}
|
||||
}
|
||||
|
||||
Err(_) => {
|
||||
debug!("illegal Content-Length: {:?}", value);
|
||||
return Err(ParseError::Header);
|
||||
@ -114,22 +134,23 @@ pub(crate) trait MessageType: Sized {
|
||||
return Err(ParseError::Header);
|
||||
}
|
||||
|
||||
header::TRANSFER_ENCODING => {
|
||||
header::TRANSFER_ENCODING if version == Version::HTTP_11 => {
|
||||
seen_te = true;
|
||||
|
||||
if let Ok(s) = value.to_str().map(str::trim) {
|
||||
if s.eq_ignore_ascii_case("chunked") {
|
||||
if let Ok(val) = value.to_str().map(str::trim) {
|
||||
if val.eq_ignore_ascii_case("chunked") {
|
||||
chunked = true;
|
||||
} else if s.eq_ignore_ascii_case("identity") {
|
||||
} else if val.eq_ignore_ascii_case("identity") {
|
||||
// allow silently since multiple TE headers are already checked
|
||||
} else {
|
||||
debug!("illegal Transfer-Encoding: {:?}", s);
|
||||
debug!("illegal Transfer-Encoding: {:?}", val);
|
||||
return Err(ParseError::Header);
|
||||
}
|
||||
} else {
|
||||
return Err(ParseError::Header);
|
||||
}
|
||||
}
|
||||
|
||||
// connection keep-alive state
|
||||
header::CONNECTION => {
|
||||
ka = if let Ok(conn) = value.to_str().map(str::trim) {
|
||||
@ -146,6 +167,7 @@ pub(crate) trait MessageType: Sized {
|
||||
None
|
||||
};
|
||||
}
|
||||
|
||||
header::UPGRADE => {
|
||||
if let Ok(val) = value.to_str().map(str::trim) {
|
||||
if val.eq_ignore_ascii_case("websocket") {
|
||||
@ -153,19 +175,23 @@ pub(crate) trait MessageType: Sized {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
header::EXPECT => {
|
||||
let bytes = value.as_bytes();
|
||||
if bytes.len() >= 4 && &bytes[0..4] == b"100-" {
|
||||
expect = true;
|
||||
}
|
||||
}
|
||||
|
||||
_ => {}
|
||||
}
|
||||
|
||||
headers.append(name, value);
|
||||
}
|
||||
}
|
||||
|
||||
self.set_connection_type(ka);
|
||||
|
||||
if expect {
|
||||
self.set_expect()
|
||||
}
|
||||
@ -209,15 +235,16 @@ impl MessageType for Request {
|
||||
|
||||
let (len, method, uri, ver, h_len) = {
|
||||
// SAFETY:
|
||||
// Create an uninitialized array of `MaybeUninit`. The `assume_init` is
|
||||
// safe because the type we are claiming to have initialized here is a
|
||||
// bunch of `MaybeUninit`s, which do not require initialization.
|
||||
// Create an uninitialized array of `MaybeUninit`. The `assume_init` is safe because the
|
||||
// type we are claiming to have initialized here is a bunch of `MaybeUninit`s, which
|
||||
// do not require initialization.
|
||||
let mut parsed = unsafe {
|
||||
MaybeUninit::<[MaybeUninit<httparse::Header<'_>>; MAX_HEADERS]>::uninit()
|
||||
.assume_init()
|
||||
};
|
||||
|
||||
let mut req = httparse::Request::new(&mut []);
|
||||
|
||||
match req.parse_with_uninit_headers(src, &mut parsed)? {
|
||||
httparse::Status::Complete(len) => {
|
||||
let method = Method::from_bytes(req.method.unwrap().as_bytes())
|
||||
@ -232,6 +259,7 @@ impl MessageType for Request {
|
||||
|
||||
(len, method, uri, version, req.headers.len())
|
||||
}
|
||||
|
||||
httparse::Status::Partial => {
|
||||
return if src.len() >= MAX_BUFFER_SIZE {
|
||||
trace!("MAX_BUFFER_SIZE unprocessed data reached, closing");
|
||||
@ -247,7 +275,22 @@ impl MessageType for Request {
|
||||
let mut msg = Request::new();
|
||||
|
||||
// convert headers
|
||||
let length = msg.set_headers(&src.split_to(len).freeze(), &headers[..h_len])?;
|
||||
let mut length =
|
||||
msg.set_headers(&src.split_to(len).freeze(), &headers[..h_len], ver)?;
|
||||
|
||||
// disallow HTTP/1.0 POST requests that do not contain a Content-Length headers
|
||||
// see https://datatracker.ietf.org/doc/html/rfc1945#section-7.2.2
|
||||
if ver == Version::HTTP_10 && method == Method::POST && length.is_none() {
|
||||
debug!("no Content-Length specified for HTTP/1.0 POST request");
|
||||
return Err(ParseError::Header);
|
||||
}
|
||||
|
||||
// Remove CL value if 0 now that all headers and HTTP/1.0 special cases are processed.
|
||||
// Protects against some request smuggling attacks.
|
||||
// See https://github.com/actix/actix-web/issues/2767.
|
||||
if length.is_zero() {
|
||||
length = PayloadLength::None;
|
||||
}
|
||||
|
||||
// payload decoder
|
||||
let decoder = match length {
|
||||
@ -291,22 +334,35 @@ impl MessageType for ResponseHead {
|
||||
let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY;
|
||||
|
||||
let (len, ver, status, h_len) = {
|
||||
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = EMPTY_HEADER_ARRAY;
|
||||
// SAFETY:
|
||||
// Create an uninitialized array of `MaybeUninit`. The `assume_init` is safe because the
|
||||
// type we are claiming to have initialized here is a bunch of `MaybeUninit`s, which
|
||||
// do not require initialization.
|
||||
let mut parsed = unsafe {
|
||||
MaybeUninit::<[MaybeUninit<httparse::Header<'_>>; MAX_HEADERS]>::uninit()
|
||||
.assume_init()
|
||||
};
|
||||
|
||||
let mut res = httparse::Response::new(&mut parsed);
|
||||
match res.parse(src)? {
|
||||
let mut res = httparse::Response::new(&mut []);
|
||||
|
||||
let mut config = httparse::ParserConfig::default();
|
||||
config.allow_spaces_after_header_name_in_responses(true);
|
||||
|
||||
match config.parse_response_with_uninit_headers(&mut res, src, &mut parsed)? {
|
||||
httparse::Status::Complete(len) => {
|
||||
let version = if res.version.unwrap() == 1 {
|
||||
Version::HTTP_11
|
||||
} else {
|
||||
Version::HTTP_10
|
||||
};
|
||||
|
||||
let status = StatusCode::from_u16(res.code.unwrap())
|
||||
.map_err(|_| ParseError::Status)?;
|
||||
HeaderIndex::record(src, res.headers, &mut headers);
|
||||
|
||||
(len, version, status, res.headers.len())
|
||||
}
|
||||
|
||||
httparse::Status::Partial => {
|
||||
return if src.len() >= MAX_BUFFER_SIZE {
|
||||
error!("MAX_BUFFER_SIZE unprocessed data reached, closing");
|
||||
@ -322,7 +378,15 @@ impl MessageType for ResponseHead {
|
||||
msg.version = ver;
|
||||
|
||||
// convert headers
|
||||
let length = msg.set_headers(&src.split_to(len).freeze(), &headers[..h_len])?;
|
||||
let mut length =
|
||||
msg.set_headers(&src.split_to(len).freeze(), &headers[..h_len], ver)?;
|
||||
|
||||
// Remove CL value if 0 now that all headers and HTTP/1.0 special cases are processed.
|
||||
// Protects against some request smuggling attacks.
|
||||
// See https://github.com/actix/actix-web/issues/2767.
|
||||
if length.is_zero() {
|
||||
length = PayloadLength::None;
|
||||
}
|
||||
|
||||
// message payload
|
||||
let decoder = if let PayloadLength::Payload(pl) = length {
|
||||
@ -358,9 +422,6 @@ pub(crate) const EMPTY_HEADER_INDEX: HeaderIndex = HeaderIndex {
|
||||
pub(crate) const EMPTY_HEADER_INDEX_ARRAY: [HeaderIndex; MAX_HEADERS] =
|
||||
[EMPTY_HEADER_INDEX; MAX_HEADERS];
|
||||
|
||||
pub(crate) const EMPTY_HEADER_ARRAY: [httparse::Header<'static>; MAX_HEADERS] =
|
||||
[httparse::EMPTY_HEADER; MAX_HEADERS];
|
||||
|
||||
impl HeaderIndex {
|
||||
pub(crate) fn record(
|
||||
bytes: &[u8],
|
||||
@ -379,7 +440,7 @@ impl HeaderIndex {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
/// Chunk type yielded while decoding a payload.
|
||||
pub enum PayloadItem {
|
||||
Chunk(Bytes),
|
||||
@ -389,7 +450,7 @@ pub enum PayloadItem {
|
||||
/// Decoder that can handle different payload types.
|
||||
///
|
||||
/// If a message body does not use `Transfer-Encoding`, it should include a `Content-Length`.
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct PayloadDecoder {
|
||||
kind: Kind,
|
||||
}
|
||||
@ -415,7 +476,7 @@ impl PayloadDecoder {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, PartialEq)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
enum Kind {
|
||||
/// A reader used when a `Content-Length` header is passed with a positive integer.
|
||||
Length(u64),
|
||||
@ -594,14 +655,100 @@ mod tests {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_post() {
|
||||
let mut buf = BytesMut::from("POST /test2 HTTP/1.0\r\n\r\n");
|
||||
fn parse_h09_reject() {
|
||||
let mut buf = BytesMut::from(
|
||||
"GET /test1 HTTP/0.9\r\n\
|
||||
\r\n",
|
||||
);
|
||||
|
||||
let mut reader = MessageDecoder::<Request>::default();
|
||||
reader.decode(&mut buf).unwrap_err();
|
||||
|
||||
let mut buf = BytesMut::from(
|
||||
"POST /test2 HTTP/0.9\r\n\
|
||||
Content-Length: 3\r\n\
|
||||
\r\n
|
||||
abc",
|
||||
);
|
||||
|
||||
let mut reader = MessageDecoder::<Request>::default();
|
||||
reader.decode(&mut buf).unwrap_err();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_h10_get() {
|
||||
let mut buf = BytesMut::from(
|
||||
"GET /test1 HTTP/1.0\r\n\
|
||||
\r\n",
|
||||
);
|
||||
|
||||
let mut reader = MessageDecoder::<Request>::default();
|
||||
let (req, _) = reader.decode(&mut buf).unwrap().unwrap();
|
||||
assert_eq!(req.version(), Version::HTTP_10);
|
||||
assert_eq!(*req.method(), Method::GET);
|
||||
assert_eq!(req.path(), "/test1");
|
||||
|
||||
let mut buf = BytesMut::from(
|
||||
"GET /test2 HTTP/1.0\r\n\
|
||||
Content-Length: 0\r\n\
|
||||
\r\n",
|
||||
);
|
||||
|
||||
let mut reader = MessageDecoder::<Request>::default();
|
||||
let (req, _) = reader.decode(&mut buf).unwrap().unwrap();
|
||||
assert_eq!(req.version(), Version::HTTP_10);
|
||||
assert_eq!(*req.method(), Method::GET);
|
||||
assert_eq!(req.path(), "/test2");
|
||||
|
||||
let mut buf = BytesMut::from(
|
||||
"GET /test3 HTTP/1.0\r\n\
|
||||
Content-Length: 3\r\n\
|
||||
\r\n
|
||||
abc",
|
||||
);
|
||||
|
||||
let mut reader = MessageDecoder::<Request>::default();
|
||||
let (req, _) = reader.decode(&mut buf).unwrap().unwrap();
|
||||
assert_eq!(req.version(), Version::HTTP_10);
|
||||
assert_eq!(*req.method(), Method::GET);
|
||||
assert_eq!(req.path(), "/test3");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_h10_post() {
|
||||
let mut buf = BytesMut::from(
|
||||
"POST /test1 HTTP/1.0\r\n\
|
||||
Content-Length: 3\r\n\
|
||||
\r\n\
|
||||
abc",
|
||||
);
|
||||
|
||||
let mut reader = MessageDecoder::<Request>::default();
|
||||
let (req, _) = reader.decode(&mut buf).unwrap().unwrap();
|
||||
assert_eq!(req.version(), Version::HTTP_10);
|
||||
assert_eq!(*req.method(), Method::POST);
|
||||
assert_eq!(req.path(), "/test1");
|
||||
|
||||
let mut buf = BytesMut::from(
|
||||
"POST /test2 HTTP/1.0\r\n\
|
||||
Content-Length: 0\r\n\
|
||||
\r\n",
|
||||
);
|
||||
|
||||
let mut reader = MessageDecoder::<Request>::default();
|
||||
let (req, _) = reader.decode(&mut buf).unwrap().unwrap();
|
||||
assert_eq!(req.version(), Version::HTTP_10);
|
||||
assert_eq!(*req.method(), Method::POST);
|
||||
assert_eq!(req.path(), "/test2");
|
||||
|
||||
let mut buf = BytesMut::from(
|
||||
"POST /test3 HTTP/1.0\r\n\
|
||||
\r\n",
|
||||
);
|
||||
|
||||
let mut reader = MessageDecoder::<Request>::default();
|
||||
let err = reader.decode(&mut buf).unwrap_err();
|
||||
assert!(err.to_string().contains("Header"))
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -697,121 +844,98 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_conn_default_1_0() {
|
||||
let mut buf = BytesMut::from("GET /test HTTP/1.0\r\n\r\n");
|
||||
let req = parse_ready!(&mut buf);
|
||||
|
||||
let req = parse_ready!(&mut BytesMut::from("GET /test HTTP/1.0\r\n\r\n"));
|
||||
assert_eq!(req.head().connection_type(), ConnectionType::Close);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_conn_default_1_1() {
|
||||
let mut buf = BytesMut::from("GET /test HTTP/1.1\r\n\r\n");
|
||||
let req = parse_ready!(&mut buf);
|
||||
|
||||
let req = parse_ready!(&mut BytesMut::from("GET /test HTTP/1.1\r\n\r\n"));
|
||||
assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_conn_close() {
|
||||
let mut buf = BytesMut::from(
|
||||
let req = parse_ready!(&mut BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
connection: close\r\n\r\n",
|
||||
);
|
||||
let req = parse_ready!(&mut buf);
|
||||
|
||||
));
|
||||
assert_eq!(req.head().connection_type(), ConnectionType::Close);
|
||||
|
||||
let mut buf = BytesMut::from(
|
||||
let req = parse_ready!(&mut BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
connection: Close\r\n\r\n",
|
||||
);
|
||||
let req = parse_ready!(&mut buf);
|
||||
|
||||
));
|
||||
assert_eq!(req.head().connection_type(), ConnectionType::Close);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_conn_close_1_0() {
|
||||
let mut buf = BytesMut::from(
|
||||
let req = parse_ready!(&mut BytesMut::from(
|
||||
"GET /test HTTP/1.0\r\n\
|
||||
connection: close\r\n\r\n",
|
||||
);
|
||||
|
||||
let req = parse_ready!(&mut buf);
|
||||
|
||||
));
|
||||
assert_eq!(req.head().connection_type(), ConnectionType::Close);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_conn_keep_alive_1_0() {
|
||||
let mut buf = BytesMut::from(
|
||||
let req = parse_ready!(&mut BytesMut::from(
|
||||
"GET /test HTTP/1.0\r\n\
|
||||
connection: keep-alive\r\n\r\n",
|
||||
);
|
||||
let req = parse_ready!(&mut buf);
|
||||
|
||||
));
|
||||
assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive);
|
||||
|
||||
let mut buf = BytesMut::from(
|
||||
let req = parse_ready!(&mut BytesMut::from(
|
||||
"GET /test HTTP/1.0\r\n\
|
||||
connection: Keep-Alive\r\n\r\n",
|
||||
);
|
||||
let req = parse_ready!(&mut buf);
|
||||
|
||||
));
|
||||
assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_conn_keep_alive_1_1() {
|
||||
let mut buf = BytesMut::from(
|
||||
let req = parse_ready!(&mut BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
connection: keep-alive\r\n\r\n",
|
||||
);
|
||||
let req = parse_ready!(&mut buf);
|
||||
|
||||
));
|
||||
assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_conn_other_1_0() {
|
||||
let mut buf = BytesMut::from(
|
||||
let req = parse_ready!(&mut BytesMut::from(
|
||||
"GET /test HTTP/1.0\r\n\
|
||||
connection: other\r\n\r\n",
|
||||
);
|
||||
let req = parse_ready!(&mut buf);
|
||||
|
||||
));
|
||||
assert_eq!(req.head().connection_type(), ConnectionType::Close);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_conn_other_1_1() {
|
||||
let mut buf = BytesMut::from(
|
||||
let req = parse_ready!(&mut BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
connection: other\r\n\r\n",
|
||||
);
|
||||
let req = parse_ready!(&mut buf);
|
||||
|
||||
));
|
||||
assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_conn_upgrade() {
|
||||
let mut buf = BytesMut::from(
|
||||
let req = parse_ready!(&mut BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
upgrade: websockets\r\n\
|
||||
connection: upgrade\r\n\r\n",
|
||||
);
|
||||
let req = parse_ready!(&mut buf);
|
||||
));
|
||||
|
||||
assert!(req.upgrade());
|
||||
assert_eq!(req.head().connection_type(), ConnectionType::Upgrade);
|
||||
|
||||
let mut buf = BytesMut::from(
|
||||
let req = parse_ready!(&mut BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
upgrade: Websockets\r\n\
|
||||
connection: Upgrade\r\n\r\n",
|
||||
);
|
||||
let req = parse_ready!(&mut buf);
|
||||
));
|
||||
|
||||
assert!(req.upgrade());
|
||||
assert_eq!(req.head().connection_type(), ConnectionType::Upgrade);
|
||||
@ -819,59 +943,62 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_conn_upgrade_connect_method() {
|
||||
let mut buf = BytesMut::from(
|
||||
let req = parse_ready!(&mut BytesMut::from(
|
||||
"CONNECT /test HTTP/1.1\r\n\
|
||||
content-type: text/plain\r\n\r\n",
|
||||
);
|
||||
let req = parse_ready!(&mut buf);
|
||||
));
|
||||
|
||||
assert!(req.upgrade());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_headers_content_length_err_1() {
|
||||
let mut buf = BytesMut::from(
|
||||
fn test_headers_bad_content_length() {
|
||||
// string CL
|
||||
expect_parse_err!(&mut BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
content-length: line\r\n\r\n",
|
||||
);
|
||||
));
|
||||
|
||||
expect_parse_err!(&mut buf)
|
||||
// negative CL
|
||||
expect_parse_err!(&mut BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
content-length: -1\r\n\r\n",
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_headers_content_length_err_2() {
|
||||
fn octal_ish_cl_parsed_as_decimal() {
|
||||
let mut buf = BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
content-length: -1\r\n\r\n",
|
||||
"POST /test HTTP/1.1\r\n\
|
||||
content-length: 011\r\n\r\n",
|
||||
);
|
||||
|
||||
expect_parse_err!(&mut buf);
|
||||
let mut reader = MessageDecoder::<Request>::default();
|
||||
let (_req, pl) = reader.decode(&mut buf).unwrap().unwrap();
|
||||
assert!(matches!(
|
||||
pl,
|
||||
PayloadType::Payload(pl) if pl == PayloadDecoder::length(11)
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_header() {
|
||||
let mut buf = BytesMut::from(
|
||||
expect_parse_err!(&mut BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
test line\r\n\r\n",
|
||||
);
|
||||
|
||||
expect_parse_err!(&mut buf);
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_invalid_name() {
|
||||
let mut buf = BytesMut::from(
|
||||
expect_parse_err!(&mut BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
test[]: line\r\n\r\n",
|
||||
);
|
||||
|
||||
expect_parse_err!(&mut buf);
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_http_request_bad_status_line() {
|
||||
let mut buf = BytesMut::from("getpath \r\n\r\n");
|
||||
expect_parse_err!(&mut buf);
|
||||
expect_parse_err!(&mut BytesMut::from("getpath \r\n\r\n"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -911,11 +1038,10 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_http_request_parser_utf8() {
|
||||
let mut buf = BytesMut::from(
|
||||
let req = parse_ready!(&mut BytesMut::from(
|
||||
"GET /test HTTP/1.1\r\n\
|
||||
x-test: тест\r\n\r\n",
|
||||
);
|
||||
let req = parse_ready!(&mut buf);
|
||||
));
|
||||
|
||||
assert_eq!(
|
||||
req.headers().get("x-test").unwrap().as_bytes(),
|
||||
@ -925,24 +1051,18 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_http_request_parser_two_slashes() {
|
||||
let mut buf = BytesMut::from("GET //path HTTP/1.1\r\n\r\n");
|
||||
let req = parse_ready!(&mut buf);
|
||||
|
||||
let req = parse_ready!(&mut BytesMut::from("GET //path HTTP/1.1\r\n\r\n"));
|
||||
assert_eq!(req.path(), "//path");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_http_request_parser_bad_method() {
|
||||
let mut buf = BytesMut::from("!12%()+=~$ /get HTTP/1.1\r\n\r\n");
|
||||
|
||||
expect_parse_err!(&mut buf);
|
||||
expect_parse_err!(&mut BytesMut::from("!12%()+=~$ /get HTTP/1.1\r\n\r\n"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_http_request_parser_bad_version() {
|
||||
let mut buf = BytesMut::from("GET //get HT/11\r\n\r\n");
|
||||
|
||||
expect_parse_err!(&mut buf);
|
||||
expect_parse_err!(&mut BytesMut::from("GET //get HT/11\r\n\r\n"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@ -959,29 +1079,66 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn hrs_multiple_content_length() {
|
||||
let mut buf = BytesMut::from(
|
||||
expect_parse_err!(&mut BytesMut::from(
|
||||
"GET / HTTP/1.1\r\n\
|
||||
Host: example.com\r\n\
|
||||
Content-Length: 4\r\n\
|
||||
Content-Length: 2\r\n\
|
||||
\r\n\
|
||||
abcd",
|
||||
);
|
||||
));
|
||||
|
||||
expect_parse_err!(&mut buf);
|
||||
expect_parse_err!(&mut BytesMut::from(
|
||||
"GET / HTTP/1.1\r\n\
|
||||
Host: example.com\r\n\
|
||||
Content-Length: 0\r\n\
|
||||
Content-Length: 2\r\n\
|
||||
\r\n\
|
||||
ab",
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hrs_content_length_plus() {
|
||||
let mut buf = BytesMut::from(
|
||||
expect_parse_err!(&mut BytesMut::from(
|
||||
"GET / HTTP/1.1\r\n\
|
||||
Host: example.com\r\n\
|
||||
Content-Length: +3\r\n\
|
||||
\r\n\
|
||||
000",
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hrs_te_http10() {
|
||||
// in HTTP/1.0 transfer encoding is ignored and must therefore contain a CL header
|
||||
|
||||
expect_parse_err!(&mut BytesMut::from(
|
||||
"POST / HTTP/1.0\r\n\
|
||||
Host: example.com\r\n\
|
||||
Transfer-Encoding: chunked\r\n\
|
||||
\r\n\
|
||||
3\r\n\
|
||||
aaa\r\n\
|
||||
0\r\n\
|
||||
",
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn hrs_cl_and_te_http10() {
|
||||
// in HTTP/1.0 transfer encoding is simply ignored so it's fine to have both
|
||||
|
||||
let mut buf = BytesMut::from(
|
||||
"GET / HTTP/1.0\r\n\
|
||||
Host: example.com\r\n\
|
||||
Content-Length: 3\r\n\
|
||||
Transfer-Encoding: chunked\r\n\
|
||||
\r\n\
|
||||
000",
|
||||
);
|
||||
|
||||
expect_parse_err!(&mut buf);
|
||||
parse_ready!(&mut buf);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
File diff suppressed because it is too large
Load Diff
975
actix-http/src/h1/dispatcher_tests.rs
Normal file
975
actix-http/src/h1/dispatcher_tests.rs
Normal file
@ -0,0 +1,975 @@
|
||||
use std::{future::Future, str, task::Poll, time::Duration};
|
||||
|
||||
use actix_rt::{pin, time::sleep};
|
||||
use actix_service::fn_service;
|
||||
use actix_utils::future::{ready, Ready};
|
||||
use bytes::Bytes;
|
||||
use futures_util::future::lazy;
|
||||
|
||||
use actix_codec::Framed;
|
||||
use actix_service::Service;
|
||||
use bytes::{Buf, BytesMut};
|
||||
|
||||
use super::dispatcher::{Dispatcher, DispatcherState, DispatcherStateProj, Flags};
|
||||
use crate::{
|
||||
body::MessageBody,
|
||||
config::ServiceConfig,
|
||||
h1::{Codec, ExpectHandler, UpgradeHandler},
|
||||
service::HttpFlow,
|
||||
test::{TestBuffer, TestSeqBuffer},
|
||||
Error, HttpMessage, KeepAlive, Method, OnConnectData, Request, Response, StatusCode,
|
||||
};
|
||||
|
||||
fn find_slice(haystack: &[u8], needle: &[u8], from: usize) -> Option<usize> {
|
||||
memchr::memmem::find(&haystack[from..], needle)
|
||||
}
|
||||
|
||||
fn stabilize_date_header(payload: &mut [u8]) {
|
||||
let mut from = 0;
|
||||
while let Some(pos) = find_slice(payload, b"date", from) {
|
||||
payload[(from + pos)..(from + pos + 35)]
|
||||
.copy_from_slice(b"date: Thu, 01 Jan 1970 12:34:56 UTC");
|
||||
from += 35;
|
||||
}
|
||||
}
|
||||
|
||||
fn ok_service() -> impl Service<Request, Response = Response<impl MessageBody>, Error = Error> {
|
||||
status_service(StatusCode::OK)
|
||||
}
|
||||
|
||||
fn status_service(
|
||||
status: StatusCode,
|
||||
) -> impl Service<Request, Response = Response<impl MessageBody>, Error = Error> {
|
||||
fn_service(move |_req: Request| ready(Ok::<_, Error>(Response::new(status))))
|
||||
}
|
||||
|
||||
fn echo_path_service(
|
||||
) -> impl Service<Request, Response = Response<impl MessageBody>, Error = Error> {
|
||||
fn_service(|req: Request| {
|
||||
let path = req.path().as_bytes();
|
||||
ready(Ok::<_, Error>(
|
||||
Response::ok().set_body(Bytes::copy_from_slice(path)),
|
||||
))
|
||||
})
|
||||
}
|
||||
|
||||
fn drop_payload_service(
|
||||
) -> impl Service<Request, Response = Response<&'static str>, Error = Error> {
|
||||
fn_service(|mut req: Request| async move {
|
||||
let _ = req.take_payload();
|
||||
Ok::<_, Error>(Response::with_body(StatusCode::OK, "payload dropped"))
|
||||
})
|
||||
}
|
||||
|
||||
fn echo_payload_service() -> impl Service<Request, Response = Response<Bytes>, Error = Error> {
|
||||
fn_service(|mut req: Request| {
|
||||
Box::pin(async move {
|
||||
use futures_util::StreamExt as _;
|
||||
|
||||
let mut pl = req.take_payload();
|
||||
let mut body = BytesMut::new();
|
||||
while let Some(chunk) = pl.next().await {
|
||||
body.extend_from_slice(chunk.unwrap().chunk())
|
||||
}
|
||||
|
||||
Ok::<_, Error>(Response::ok().set_body(body.freeze()))
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn late_request() {
|
||||
let mut buf = TestBuffer::empty();
|
||||
|
||||
let cfg = ServiceConfig::new(
|
||||
KeepAlive::Disabled,
|
||||
Duration::from_millis(100),
|
||||
Duration::ZERO,
|
||||
false,
|
||||
None,
|
||||
);
|
||||
let services = HttpFlow::new(ok_service(), ExpectHandler, None);
|
||||
|
||||
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
|
||||
buf.clone(),
|
||||
services,
|
||||
cfg,
|
||||
None,
|
||||
OnConnectData::default(),
|
||||
);
|
||||
pin!(h1);
|
||||
|
||||
lazy(|cx| {
|
||||
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
|
||||
|
||||
match h1.as_mut().poll(cx) {
|
||||
Poll::Ready(_) => panic!("first poll should not be ready"),
|
||||
Poll::Pending => {}
|
||||
}
|
||||
|
||||
// polls: initial
|
||||
assert_eq!(h1.poll_count, 1);
|
||||
|
||||
buf.extend_read_buf("GET /abcd HTTP/1.1\r\nConnection: close\r\n\r\n");
|
||||
|
||||
match h1.as_mut().poll(cx) {
|
||||
Poll::Pending => panic!("second poll should not be pending"),
|
||||
Poll::Ready(res) => assert!(res.is_ok()),
|
||||
}
|
||||
|
||||
// polls: initial pending => handle req => shutdown
|
||||
assert_eq!(h1.poll_count, 3);
|
||||
|
||||
let mut res = buf.take_write_buf().to_vec();
|
||||
stabilize_date_header(&mut res);
|
||||
let res = &res[..];
|
||||
|
||||
let exp = b"\
|
||||
HTTP/1.1 200 OK\r\n\
|
||||
content-length: 0\r\n\
|
||||
connection: close\r\n\
|
||||
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
|
||||
";
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
exp,
|
||||
"\nexpected response not in write buffer:\n\
|
||||
response: {:?}\n\
|
||||
expected: {:?}",
|
||||
String::from_utf8_lossy(res),
|
||||
String::from_utf8_lossy(exp)
|
||||
);
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn oneshot_connection() {
|
||||
let buf = TestBuffer::new("GET /abcd HTTP/1.1\r\n\r\n");
|
||||
|
||||
let cfg = ServiceConfig::new(
|
||||
KeepAlive::Disabled,
|
||||
Duration::from_millis(100),
|
||||
Duration::ZERO,
|
||||
false,
|
||||
None,
|
||||
);
|
||||
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
|
||||
|
||||
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
|
||||
buf.clone(),
|
||||
services,
|
||||
cfg,
|
||||
None,
|
||||
OnConnectData::default(),
|
||||
);
|
||||
pin!(h1);
|
||||
|
||||
lazy(|cx| {
|
||||
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
|
||||
|
||||
match h1.as_mut().poll(cx) {
|
||||
Poll::Pending => panic!("first poll should not be pending"),
|
||||
Poll::Ready(res) => assert!(res.is_ok()),
|
||||
}
|
||||
|
||||
// polls: initial => shutdown
|
||||
assert_eq!(h1.poll_count, 2);
|
||||
|
||||
let mut res = buf.take_write_buf().to_vec();
|
||||
stabilize_date_header(&mut res);
|
||||
let res = &res[..];
|
||||
|
||||
let exp = http_msg(
|
||||
r"
|
||||
HTTP/1.1 200 OK
|
||||
content-length: 5
|
||||
connection: close
|
||||
date: Thu, 01 Jan 1970 12:34:56 UTC
|
||||
|
||||
/abcd
|
||||
",
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
exp,
|
||||
"\nexpected response not in write buffer:\n\
|
||||
response: {:?}\n\
|
||||
expected: {:?}",
|
||||
String::from_utf8_lossy(res),
|
||||
String::from_utf8_lossy(&exp)
|
||||
);
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn keep_alive_timeout() {
|
||||
let buf = TestBuffer::new("GET /abcd HTTP/1.1\r\n\r\n");
|
||||
|
||||
let cfg = ServiceConfig::new(
|
||||
KeepAlive::Timeout(Duration::from_millis(200)),
|
||||
Duration::from_millis(100),
|
||||
Duration::ZERO,
|
||||
false,
|
||||
None,
|
||||
);
|
||||
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
|
||||
|
||||
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
|
||||
buf.clone(),
|
||||
services,
|
||||
cfg,
|
||||
None,
|
||||
OnConnectData::default(),
|
||||
);
|
||||
pin!(h1);
|
||||
|
||||
lazy(|cx| {
|
||||
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
|
||||
|
||||
assert!(
|
||||
h1.as_mut().poll(cx).is_pending(),
|
||||
"keep-alive should prevent poll from resolving"
|
||||
);
|
||||
|
||||
// polls: initial
|
||||
assert_eq!(h1.poll_count, 1);
|
||||
|
||||
let mut res = buf.take_write_buf().to_vec();
|
||||
stabilize_date_header(&mut res);
|
||||
let res = &res[..];
|
||||
|
||||
let exp = b"\
|
||||
HTTP/1.1 200 OK\r\n\
|
||||
content-length: 5\r\n\
|
||||
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
|
||||
/abcd\
|
||||
";
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
exp,
|
||||
"\nexpected response not in write buffer:\n\
|
||||
response: {:?}\n\
|
||||
expected: {:?}",
|
||||
String::from_utf8_lossy(res),
|
||||
String::from_utf8_lossy(exp)
|
||||
);
|
||||
})
|
||||
.await;
|
||||
|
||||
// sleep slightly longer than keep-alive timeout
|
||||
sleep(Duration::from_millis(250)).await;
|
||||
|
||||
lazy(|cx| {
|
||||
assert!(
|
||||
h1.as_mut().poll(cx).is_ready(),
|
||||
"keep-alive should have resolved",
|
||||
);
|
||||
|
||||
// polls: initial => keep-alive wake-up shutdown
|
||||
assert_eq!(h1.poll_count, 2);
|
||||
|
||||
if let DispatcherStateProj::Normal { inner } = h1.project().inner.project() {
|
||||
// connection closed
|
||||
assert!(inner.flags.contains(Flags::SHUTDOWN));
|
||||
assert!(inner.flags.contains(Flags::WRITE_DISCONNECT));
|
||||
// and nothing added to write buffer
|
||||
assert!(buf.write_buf_slice().is_empty());
|
||||
}
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn keep_alive_follow_up_req() {
|
||||
let mut buf = TestBuffer::new("GET /abcd HTTP/1.1\r\n\r\n");
|
||||
|
||||
let cfg = ServiceConfig::new(
|
||||
KeepAlive::Timeout(Duration::from_millis(500)),
|
||||
Duration::from_millis(100),
|
||||
Duration::ZERO,
|
||||
false,
|
||||
None,
|
||||
);
|
||||
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
|
||||
|
||||
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
|
||||
buf.clone(),
|
||||
services,
|
||||
cfg,
|
||||
None,
|
||||
OnConnectData::default(),
|
||||
);
|
||||
pin!(h1);
|
||||
|
||||
lazy(|cx| {
|
||||
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
|
||||
|
||||
assert!(
|
||||
h1.as_mut().poll(cx).is_pending(),
|
||||
"keep-alive should prevent poll from resolving"
|
||||
);
|
||||
|
||||
// polls: initial
|
||||
assert_eq!(h1.poll_count, 1);
|
||||
|
||||
let mut res = buf.take_write_buf().to_vec();
|
||||
stabilize_date_header(&mut res);
|
||||
let res = &res[..];
|
||||
|
||||
let exp = b"\
|
||||
HTTP/1.1 200 OK\r\n\
|
||||
content-length: 5\r\n\
|
||||
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
|
||||
/abcd\
|
||||
";
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
exp,
|
||||
"\nexpected response not in write buffer:\n\
|
||||
response: {:?}\n\
|
||||
expected: {:?}",
|
||||
String::from_utf8_lossy(res),
|
||||
String::from_utf8_lossy(exp)
|
||||
);
|
||||
})
|
||||
.await;
|
||||
|
||||
// sleep for less than KA timeout
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
|
||||
lazy(|cx| {
|
||||
assert!(
|
||||
h1.as_mut().poll(cx).is_pending(),
|
||||
"keep-alive should not have resolved dispatcher yet",
|
||||
);
|
||||
|
||||
// polls: initial => manual
|
||||
assert_eq!(h1.poll_count, 2);
|
||||
|
||||
if let DispatcherStateProj::Normal { inner } = h1.as_mut().project().inner.project() {
|
||||
// connection not closed
|
||||
assert!(!inner.flags.contains(Flags::SHUTDOWN));
|
||||
assert!(!inner.flags.contains(Flags::WRITE_DISCONNECT));
|
||||
// and nothing added to write buffer
|
||||
assert!(buf.write_buf_slice().is_empty());
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
lazy(|cx| {
|
||||
buf.extend_read_buf(
|
||||
"\
|
||||
GET /efg HTTP/1.1\r\n\
|
||||
Connection: close\r\n\
|
||||
\r\n\r\n",
|
||||
);
|
||||
|
||||
assert!(
|
||||
h1.as_mut().poll(cx).is_ready(),
|
||||
"connection close header should override keep-alive setting",
|
||||
);
|
||||
|
||||
// polls: initial => manual => follow-up req => shutdown
|
||||
assert_eq!(h1.poll_count, 4);
|
||||
|
||||
if let DispatcherStateProj::Normal { inner } = h1.as_mut().project().inner.project() {
|
||||
// connection closed
|
||||
assert!(inner.flags.contains(Flags::SHUTDOWN));
|
||||
assert!(!inner.flags.contains(Flags::WRITE_DISCONNECT));
|
||||
}
|
||||
|
||||
let mut res = buf.take_write_buf().to_vec();
|
||||
stabilize_date_header(&mut res);
|
||||
let res = &res[..];
|
||||
|
||||
let exp = b"\
|
||||
HTTP/1.1 200 OK\r\n\
|
||||
content-length: 4\r\n\
|
||||
connection: close\r\n\
|
||||
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
|
||||
/efg\
|
||||
";
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
exp,
|
||||
"\nexpected response not in write buffer:\n\
|
||||
response: {:?}\n\
|
||||
expected: {:?}",
|
||||
String::from_utf8_lossy(res),
|
||||
String::from_utf8_lossy(exp)
|
||||
);
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn req_parse_err() {
|
||||
lazy(|cx| {
|
||||
let buf = TestBuffer::new("GET /test HTTP/1\r\n\r\n");
|
||||
|
||||
let services = HttpFlow::new(ok_service(), ExpectHandler, None);
|
||||
|
||||
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
|
||||
buf.clone(),
|
||||
services,
|
||||
ServiceConfig::default(),
|
||||
None,
|
||||
OnConnectData::default(),
|
||||
);
|
||||
|
||||
pin!(h1);
|
||||
|
||||
match h1.as_mut().poll(cx) {
|
||||
Poll::Pending => panic!(),
|
||||
Poll::Ready(res) => assert!(res.is_err()),
|
||||
}
|
||||
|
||||
if let DispatcherStateProj::Normal { inner } = h1.project().inner.project() {
|
||||
assert!(inner.flags.contains(Flags::READ_DISCONNECT));
|
||||
assert_eq!(
|
||||
&buf.write_buf_slice()[..26],
|
||||
b"HTTP/1.1 400 Bad Request\r\n"
|
||||
);
|
||||
}
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn pipelining_ok_then_ok() {
|
||||
lazy(|cx| {
|
||||
let buf = TestBuffer::new(
|
||||
"\
|
||||
GET /abcd HTTP/1.1\r\n\r\n\
|
||||
GET /def HTTP/1.1\r\n\r\n\
|
||||
",
|
||||
);
|
||||
|
||||
let cfg = ServiceConfig::new(
|
||||
KeepAlive::Disabled,
|
||||
Duration::from_millis(1),
|
||||
Duration::from_millis(1),
|
||||
false,
|
||||
None,
|
||||
);
|
||||
|
||||
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
|
||||
|
||||
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
|
||||
buf.clone(),
|
||||
services,
|
||||
cfg,
|
||||
None,
|
||||
OnConnectData::default(),
|
||||
);
|
||||
|
||||
pin!(h1);
|
||||
|
||||
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
|
||||
|
||||
match h1.as_mut().poll(cx) {
|
||||
Poll::Pending => panic!("first poll should not be pending"),
|
||||
Poll::Ready(res) => assert!(res.is_ok()),
|
||||
}
|
||||
|
||||
// polls: initial => shutdown
|
||||
assert_eq!(h1.poll_count, 2);
|
||||
|
||||
let mut res = buf.write_buf_slice_mut();
|
||||
stabilize_date_header(&mut res);
|
||||
let res = &res[..];
|
||||
|
||||
let exp = b"\
|
||||
HTTP/1.1 200 OK\r\n\
|
||||
content-length: 5\r\n\
|
||||
connection: close\r\n\
|
||||
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
|
||||
/abcd\
|
||||
HTTP/1.1 200 OK\r\n\
|
||||
content-length: 4\r\n\
|
||||
connection: close\r\n\
|
||||
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
|
||||
/def\
|
||||
";
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
exp,
|
||||
"\nexpected response not in write buffer:\n\
|
||||
response: {:?}\n\
|
||||
expected: {:?}",
|
||||
String::from_utf8_lossy(res),
|
||||
String::from_utf8_lossy(exp)
|
||||
);
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn pipelining_ok_then_bad() {
|
||||
lazy(|cx| {
|
||||
let buf = TestBuffer::new(
|
||||
"\
|
||||
GET /abcd HTTP/1.1\r\n\r\n\
|
||||
GET /def HTTP/1\r\n\r\n\
|
||||
",
|
||||
);
|
||||
|
||||
let cfg = ServiceConfig::new(
|
||||
KeepAlive::Disabled,
|
||||
Duration::from_millis(1),
|
||||
Duration::from_millis(1),
|
||||
false,
|
||||
None,
|
||||
);
|
||||
|
||||
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
|
||||
|
||||
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
|
||||
buf.clone(),
|
||||
services,
|
||||
cfg,
|
||||
None,
|
||||
OnConnectData::default(),
|
||||
);
|
||||
|
||||
pin!(h1);
|
||||
|
||||
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
|
||||
|
||||
match h1.as_mut().poll(cx) {
|
||||
Poll::Pending => panic!("first poll should not be pending"),
|
||||
Poll::Ready(res) => assert!(res.is_err()),
|
||||
}
|
||||
|
||||
// polls: initial => shutdown
|
||||
assert_eq!(h1.poll_count, 1);
|
||||
|
||||
let mut res = buf.write_buf_slice_mut();
|
||||
stabilize_date_header(&mut res);
|
||||
let res = &res[..];
|
||||
|
||||
let exp = b"\
|
||||
HTTP/1.1 200 OK\r\n\
|
||||
content-length: 5\r\n\
|
||||
connection: close\r\n\
|
||||
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
|
||||
/abcd\
|
||||
HTTP/1.1 400 Bad Request\r\n\
|
||||
content-length: 0\r\n\
|
||||
connection: close\r\n\
|
||||
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
|
||||
";
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
exp,
|
||||
"\nexpected response not in write buffer:\n\
|
||||
response: {:?}\n\
|
||||
expected: {:?}",
|
||||
String::from_utf8_lossy(res),
|
||||
String::from_utf8_lossy(exp)
|
||||
);
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn expect_handling() {
|
||||
lazy(|cx| {
|
||||
let mut buf = TestSeqBuffer::empty();
|
||||
let cfg = ServiceConfig::new(
|
||||
KeepAlive::Disabled,
|
||||
Duration::ZERO,
|
||||
Duration::ZERO,
|
||||
false,
|
||||
None,
|
||||
);
|
||||
|
||||
let services = HttpFlow::new(echo_payload_service(), ExpectHandler, None);
|
||||
|
||||
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
|
||||
buf.clone(),
|
||||
services,
|
||||
cfg,
|
||||
None,
|
||||
OnConnectData::default(),
|
||||
);
|
||||
|
||||
buf.extend_read_buf(
|
||||
"\
|
||||
POST /upload HTTP/1.1\r\n\
|
||||
Content-Length: 5\r\n\
|
||||
Expect: 100-continue\r\n\
|
||||
\r\n\
|
||||
",
|
||||
);
|
||||
|
||||
pin!(h1);
|
||||
|
||||
assert!(h1.as_mut().poll(cx).is_pending());
|
||||
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
|
||||
|
||||
// polls: manual
|
||||
assert_eq!(h1.poll_count, 1);
|
||||
|
||||
if let DispatcherState::Normal { ref inner } = h1.inner {
|
||||
let io = inner.io.as_ref().unwrap();
|
||||
let res = &io.write_buf()[..];
|
||||
assert_eq!(
|
||||
str::from_utf8(res).unwrap(),
|
||||
"HTTP/1.1 100 Continue\r\n\r\n"
|
||||
);
|
||||
}
|
||||
|
||||
buf.extend_read_buf("12345");
|
||||
assert!(h1.as_mut().poll(cx).is_ready());
|
||||
|
||||
// polls: manual manual shutdown
|
||||
assert_eq!(h1.poll_count, 3);
|
||||
|
||||
if let DispatcherState::Normal { ref inner } = h1.inner {
|
||||
let io = inner.io.as_ref().unwrap();
|
||||
let mut res = io.write_buf()[..].to_owned();
|
||||
stabilize_date_header(&mut res);
|
||||
|
||||
assert_eq!(
|
||||
str::from_utf8(&res).unwrap(),
|
||||
"\
|
||||
HTTP/1.1 100 Continue\r\n\
|
||||
\r\n\
|
||||
HTTP/1.1 200 OK\r\n\
|
||||
content-length: 5\r\n\
|
||||
connection: close\r\n\
|
||||
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\
|
||||
\r\n\
|
||||
12345\
|
||||
"
|
||||
);
|
||||
}
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn expect_eager() {
|
||||
lazy(|cx| {
|
||||
let mut buf = TestSeqBuffer::empty();
|
||||
let cfg = ServiceConfig::new(
|
||||
KeepAlive::Disabled,
|
||||
Duration::ZERO,
|
||||
Duration::ZERO,
|
||||
false,
|
||||
None,
|
||||
);
|
||||
|
||||
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
|
||||
|
||||
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
|
||||
buf.clone(),
|
||||
services,
|
||||
cfg,
|
||||
None,
|
||||
OnConnectData::default(),
|
||||
);
|
||||
|
||||
buf.extend_read_buf(
|
||||
"\
|
||||
POST /upload HTTP/1.1\r\n\
|
||||
Content-Length: 5\r\n\
|
||||
Expect: 100-continue\r\n\
|
||||
\r\n\
|
||||
",
|
||||
);
|
||||
|
||||
pin!(h1);
|
||||
|
||||
assert!(h1.as_mut().poll(cx).is_ready());
|
||||
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
|
||||
|
||||
// polls: manual shutdown
|
||||
assert_eq!(h1.poll_count, 2);
|
||||
|
||||
if let DispatcherState::Normal { ref inner } = h1.inner {
|
||||
let io = inner.io.as_ref().unwrap();
|
||||
let mut res = io.write_buf()[..].to_owned();
|
||||
stabilize_date_header(&mut res);
|
||||
|
||||
// Despite the content-length header and even though the request payload has not
|
||||
// been sent, this test expects a complete service response since the payload
|
||||
// is not used at all. The service passed to dispatcher is path echo and doesn't
|
||||
// consume payload bytes.
|
||||
assert_eq!(
|
||||
str::from_utf8(&res).unwrap(),
|
||||
"\
|
||||
HTTP/1.1 100 Continue\r\n\
|
||||
\r\n\
|
||||
HTTP/1.1 200 OK\r\n\
|
||||
content-length: 7\r\n\
|
||||
connection: close\r\n\
|
||||
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\
|
||||
\r\n\
|
||||
/upload\
|
||||
"
|
||||
);
|
||||
}
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn upgrade_handling() {
|
||||
struct TestUpgrade;
|
||||
|
||||
impl<T> Service<(Request, Framed<T, Codec>)> for TestUpgrade {
|
||||
type Response = ();
|
||||
type Error = Error;
|
||||
type Future = Ready<Result<Self::Response, Self::Error>>;
|
||||
|
||||
actix_service::always_ready!();
|
||||
|
||||
fn call(&self, (req, _framed): (Request, Framed<T, Codec>)) -> Self::Future {
|
||||
assert_eq!(req.method(), Method::GET);
|
||||
assert!(req.upgrade());
|
||||
assert_eq!(req.headers().get("upgrade").unwrap(), "websocket");
|
||||
ready(Ok(()))
|
||||
}
|
||||
}
|
||||
|
||||
lazy(|cx| {
|
||||
let mut buf = TestSeqBuffer::empty();
|
||||
let cfg = ServiceConfig::new(
|
||||
KeepAlive::Disabled,
|
||||
Duration::ZERO,
|
||||
Duration::ZERO,
|
||||
false,
|
||||
None,
|
||||
);
|
||||
|
||||
let services = HttpFlow::new(ok_service(), ExpectHandler, Some(TestUpgrade));
|
||||
|
||||
let h1 = Dispatcher::<_, _, _, _, TestUpgrade>::new(
|
||||
buf.clone(),
|
||||
services,
|
||||
cfg,
|
||||
None,
|
||||
OnConnectData::default(),
|
||||
);
|
||||
|
||||
buf.extend_read_buf(
|
||||
"\
|
||||
GET /ws HTTP/1.1\r\n\
|
||||
Connection: Upgrade\r\n\
|
||||
Upgrade: websocket\r\n\
|
||||
\r\n\
|
||||
",
|
||||
);
|
||||
|
||||
pin!(h1);
|
||||
|
||||
assert!(h1.as_mut().poll(cx).is_ready());
|
||||
assert!(matches!(&h1.inner, DispatcherState::Upgrade { .. }));
|
||||
|
||||
// polls: manual shutdown
|
||||
assert_eq!(h1.poll_count, 2);
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
// fix in #2624 reverted temporarily
|
||||
// complete fix tracked in #2745
|
||||
#[ignore]
|
||||
#[actix_rt::test]
|
||||
async fn handler_drop_payload() {
|
||||
let _ = env_logger::try_init();
|
||||
|
||||
let mut buf = TestBuffer::new(http_msg(
|
||||
r"
|
||||
POST /drop-payload HTTP/1.1
|
||||
Content-Length: 3
|
||||
|
||||
abc
|
||||
",
|
||||
));
|
||||
|
||||
let services = HttpFlow::new(
|
||||
drop_payload_service(),
|
||||
ExpectHandler,
|
||||
None::<UpgradeHandler>,
|
||||
);
|
||||
|
||||
let h1 = Dispatcher::new(
|
||||
buf.clone(),
|
||||
services,
|
||||
ServiceConfig::default(),
|
||||
None,
|
||||
OnConnectData::default(),
|
||||
);
|
||||
pin!(h1);
|
||||
|
||||
lazy(|cx| {
|
||||
assert!(h1.as_mut().poll(cx).is_pending());
|
||||
|
||||
// polls: manual
|
||||
assert_eq!(h1.poll_count, 1);
|
||||
|
||||
let mut res = BytesMut::from(buf.take_write_buf().as_ref());
|
||||
stabilize_date_header(&mut res);
|
||||
let res = &res[..];
|
||||
|
||||
let exp = http_msg(
|
||||
r"
|
||||
HTTP/1.1 200 OK
|
||||
content-length: 15
|
||||
date: Thu, 01 Jan 1970 12:34:56 UTC
|
||||
|
||||
payload dropped
|
||||
",
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
exp,
|
||||
"\nexpected response not in write buffer:\n\
|
||||
response: {:?}\n\
|
||||
expected: {:?}",
|
||||
String::from_utf8_lossy(res),
|
||||
String::from_utf8_lossy(&exp)
|
||||
);
|
||||
|
||||
if let DispatcherStateProj::Normal { inner } = h1.as_mut().project().inner.project() {
|
||||
assert!(inner.state.is_none());
|
||||
}
|
||||
})
|
||||
.await;
|
||||
|
||||
lazy(|cx| {
|
||||
// add message that claims to have payload longer than provided
|
||||
buf.extend_read_buf(http_msg(
|
||||
r"
|
||||
POST /drop-payload HTTP/1.1
|
||||
Content-Length: 200
|
||||
|
||||
abc
|
||||
",
|
||||
));
|
||||
|
||||
assert!(h1.as_mut().poll(cx).is_pending());
|
||||
|
||||
// polls: manual => manual
|
||||
assert_eq!(h1.poll_count, 2);
|
||||
|
||||
let mut res = BytesMut::from(buf.take_write_buf().as_ref());
|
||||
stabilize_date_header(&mut res);
|
||||
let res = &res[..];
|
||||
|
||||
// expect response immediately even though request side has not finished reading payload
|
||||
let exp = http_msg(
|
||||
r"
|
||||
HTTP/1.1 200 OK
|
||||
content-length: 15
|
||||
date: Thu, 01 Jan 1970 12:34:56 UTC
|
||||
|
||||
payload dropped
|
||||
",
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
exp,
|
||||
"\nexpected response not in write buffer:\n\
|
||||
response: {:?}\n\
|
||||
expected: {:?}",
|
||||
String::from_utf8_lossy(res),
|
||||
String::from_utf8_lossy(&exp)
|
||||
);
|
||||
})
|
||||
.await;
|
||||
|
||||
lazy(|cx| {
|
||||
assert!(h1.as_mut().poll(cx).is_ready());
|
||||
|
||||
// polls: manual => manual => manual
|
||||
assert_eq!(h1.poll_count, 3);
|
||||
|
||||
let mut res = BytesMut::from(buf.take_write_buf().as_ref());
|
||||
stabilize_date_header(&mut res);
|
||||
let res = &res[..];
|
||||
|
||||
// expect that unrequested error response is sent back since connection could not be cleaned
|
||||
let exp = http_msg(
|
||||
r"
|
||||
HTTP/1.1 500 Internal Server Error
|
||||
content-length: 0
|
||||
connection: close
|
||||
date: Thu, 01 Jan 1970 12:34:56 UTC
|
||||
|
||||
",
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
res,
|
||||
exp,
|
||||
"\nexpected response not in write buffer:\n\
|
||||
response: {:?}\n\
|
||||
expected: {:?}",
|
||||
String::from_utf8_lossy(res),
|
||||
String::from_utf8_lossy(&exp)
|
||||
);
|
||||
})
|
||||
.await;
|
||||
}
|
||||
|
||||
fn http_msg(msg: impl AsRef<str>) -> BytesMut {
|
||||
let mut msg = msg
|
||||
.as_ref()
|
||||
.trim()
|
||||
.split('\n')
|
||||
.map(|line| [line.trim_start(), "\r"].concat())
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
|
||||
// remove trailing \r
|
||||
msg.pop();
|
||||
|
||||
if !msg.is_empty() && !msg.contains("\r\n\r\n") {
|
||||
msg.push_str("\r\n\r\n");
|
||||
}
|
||||
|
||||
BytesMut::from(msg.as_bytes())
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn http_msg_creates_msg() {
|
||||
assert_eq!(http_msg(r""), "");
|
||||
|
||||
assert_eq!(
|
||||
http_msg(
|
||||
r"
|
||||
POST / HTTP/1.1
|
||||
Content-Length: 3
|
||||
|
||||
abc
|
||||
"
|
||||
),
|
||||
"POST / HTTP/1.1\r\nContent-Length: 3\r\n\r\nabc"
|
||||
);
|
||||
|
||||
assert_eq!(
|
||||
http_msg(
|
||||
r"
|
||||
GET / HTTP/1.1
|
||||
Content-Length: 3
|
||||
|
||||
"
|
||||
),
|
||||
"GET / HTTP/1.1\r\nContent-Length: 3\r\n\r\n"
|
||||
);
|
||||
}
|
@ -152,7 +152,6 @@ pub(crate) trait MessageType: Sized {
|
||||
let k = key.as_str().as_bytes();
|
||||
let k_len = k.len();
|
||||
|
||||
// TODO: drain?
|
||||
for val in value.iter() {
|
||||
let v = val.as_ref();
|
||||
let v_len = v.len();
|
||||
@ -211,14 +210,14 @@ pub(crate) trait MessageType: Sized {
|
||||
dst.advance_mut(pos);
|
||||
}
|
||||
|
||||
// optimized date header, set_date writes \r\n
|
||||
if !has_date {
|
||||
config.set_date(dst, camel_case);
|
||||
} else {
|
||||
// msg eof
|
||||
dst.extend_from_slice(b"\r\n");
|
||||
// optimized date header, write_date_header writes its own \r\n
|
||||
config.write_date_header(dst, camel_case);
|
||||
}
|
||||
|
||||
// end-of-headers marker
|
||||
dst.extend_from_slice(b"\r\n");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@ -319,16 +318,17 @@ impl MessageType for RequestHeadType {
|
||||
}
|
||||
|
||||
impl<T: MessageType> MessageEncoder<T> {
|
||||
/// Encode message
|
||||
/// Encode chunk.
|
||||
pub fn encode_chunk(&mut self, msg: &[u8], buf: &mut BytesMut) -> io::Result<bool> {
|
||||
self.te.encode(msg, buf)
|
||||
}
|
||||
|
||||
/// Encode eof
|
||||
/// Encode EOF.
|
||||
pub fn encode_eof(&mut self, buf: &mut BytesMut) -> io::Result<()> {
|
||||
self.te.encode_eof(buf)
|
||||
}
|
||||
|
||||
/// Encode message.
|
||||
pub fn encode(
|
||||
&mut self,
|
||||
dst: &mut BytesMut,
|
||||
@ -450,7 +450,7 @@ impl TransferEncoding {
|
||||
|
||||
buf.extend_from_slice(&msg[..len as usize]);
|
||||
|
||||
*remaining -= len as u64;
|
||||
*remaining -= len;
|
||||
Ok(*remaining == 0)
|
||||
} else {
|
||||
Ok(true)
|
||||
@ -517,6 +517,7 @@ unsafe fn write_camel_case(value: &[u8], buf: *mut u8, len: usize) {
|
||||
if let Some(c @ b'a'..=b'z') = iter.next() {
|
||||
buffer[index] = c & 0b1101_1111;
|
||||
}
|
||||
index += 1;
|
||||
}
|
||||
|
||||
index += 1;
|
||||
@ -528,7 +529,7 @@ mod tests {
|
||||
use std::rc::Rc;
|
||||
|
||||
use bytes::Bytes;
|
||||
use http::header::AUTHORIZATION;
|
||||
use http::header::{AUTHORIZATION, UPGRADE_INSECURE_REQUESTS};
|
||||
|
||||
use super::*;
|
||||
use crate::{
|
||||
@ -559,6 +560,9 @@ mod tests {
|
||||
head.headers
|
||||
.insert(CONTENT_TYPE, HeaderValue::from_static("plain/text"));
|
||||
|
||||
head.headers
|
||||
.insert(UPGRADE_INSECURE_REQUESTS, HeaderValue::from_static("1"));
|
||||
|
||||
let mut head = RequestHeadType::Owned(head);
|
||||
|
||||
let _ = head.encode_headers(
|
||||
@ -574,6 +578,7 @@ mod tests {
|
||||
assert!(data.contains("Connection: close\r\n"));
|
||||
assert!(data.contains("Content-Type: plain/text\r\n"));
|
||||
assert!(data.contains("Date: date\r\n"));
|
||||
assert!(data.contains("Upgrade-Insecure-Requests: 1\r\n"));
|
||||
|
||||
let _ = head.encode_headers(
|
||||
&mut bytes,
|
||||
|
@ -7,10 +7,13 @@ mod client;
|
||||
mod codec;
|
||||
mod decoder;
|
||||
mod dispatcher;
|
||||
#[cfg(test)]
|
||||
mod dispatcher_tests;
|
||||
mod encoder;
|
||||
mod expect;
|
||||
mod payload;
|
||||
mod service;
|
||||
mod timer;
|
||||
mod upgrade;
|
||||
mod utils;
|
||||
|
||||
@ -26,9 +29,10 @@ pub use self::utils::SendResponse;
|
||||
#[derive(Debug)]
|
||||
/// Codec message
|
||||
pub enum Message<T> {
|
||||
/// Http message
|
||||
/// HTTP message.
|
||||
Item(T),
|
||||
/// Payload chunk
|
||||
|
||||
/// Payload chunk.
|
||||
Chunk(Option<Bytes>),
|
||||
}
|
||||
|
||||
|
@ -16,7 +16,7 @@ use crate::error::PayloadError;
|
||||
/// max buffer size 32k
|
||||
pub(crate) const MAX_BUFFER_SIZE: usize = 32_768;
|
||||
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum PayloadStatus {
|
||||
Read,
|
||||
Pause,
|
||||
@ -252,18 +252,15 @@ impl Inner {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::panic::{RefUnwindSafe, UnwindSafe};
|
||||
|
||||
use actix_utils::future::poll_fn;
|
||||
use static_assertions::{assert_impl_all, assert_not_impl_any};
|
||||
|
||||
use super::*;
|
||||
|
||||
assert_impl_all!(Payload: Unpin);
|
||||
assert_not_impl_any!(Payload: Send, Sync, UnwindSafe, RefUnwindSafe);
|
||||
assert_not_impl_any!(Payload: Send, Sync);
|
||||
|
||||
assert_impl_all!(Inner: Unpin, Send, Sync);
|
||||
assert_not_impl_any!(Inner: UnwindSafe, RefUnwindSafe);
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_unread_data() {
|
||||
|
@ -13,6 +13,7 @@ use actix_service::{
|
||||
};
|
||||
use actix_utils::future::ready;
|
||||
use futures_core::future::LocalBoxFuture;
|
||||
use tracing::error;
|
||||
|
||||
use crate::{
|
||||
body::{BoxBody, MessageBody},
|
||||
@ -133,6 +134,7 @@ mod openssl {
|
||||
U::InitError: fmt::Debug,
|
||||
{
|
||||
/// Create OpenSSL based service.
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "openssl")))]
|
||||
pub fn openssl(
|
||||
self,
|
||||
acceptor: SslAcceptor,
|
||||
@ -195,6 +197,7 @@ mod rustls {
|
||||
U::InitError: fmt::Debug,
|
||||
{
|
||||
/// Create Rustls based service.
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "rustls")))]
|
||||
pub fn rustls(
|
||||
self,
|
||||
config: ServerConfig,
|
||||
@ -305,13 +308,13 @@ where
|
||||
Box::pin(async move {
|
||||
let expect = expect
|
||||
.await
|
||||
.map_err(|e| log::error!("Init http expect service error: {:?}", e))?;
|
||||
.map_err(|e| error!("Init http expect service error: {:?}", e))?;
|
||||
|
||||
let upgrade = match upgrade {
|
||||
Some(upgrade) => {
|
||||
let upgrade = upgrade
|
||||
.await
|
||||
.map_err(|e| log::error!("Init http upgrade service error: {:?}", e))?;
|
||||
.map_err(|e| error!("Init http upgrade service error: {:?}", e))?;
|
||||
Some(upgrade)
|
||||
}
|
||||
None => None,
|
||||
@ -319,7 +322,7 @@ where
|
||||
|
||||
let service = service
|
||||
.await
|
||||
.map_err(|e| log::error!("Init http service error: {:?}", e))?;
|
||||
.map_err(|e| error!("Init http service error: {:?}", e))?;
|
||||
|
||||
Ok(H1ServiceHandler::new(
|
||||
cfg,
|
||||
@ -357,7 +360,7 @@ where
|
||||
|
||||
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self._poll_ready(cx).map_err(|err| {
|
||||
log::error!("HTTP/1 service readiness error: {:?}", err);
|
||||
error!("HTTP/1 service readiness error: {:?}", err);
|
||||
DispatchError::Service(err)
|
||||
})
|
||||
}
|
||||
|
81
actix-http/src/h1/timer.rs
Normal file
81
actix-http/src/h1/timer.rs
Normal file
@ -0,0 +1,81 @@
|
||||
use std::{fmt, future::Future, pin::Pin, task::Context};
|
||||
|
||||
use actix_rt::time::{Instant, Sleep};
|
||||
use tracing::trace;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(super) enum TimerState {
|
||||
Disabled,
|
||||
Inactive,
|
||||
Active { timer: Pin<Box<Sleep>> },
|
||||
}
|
||||
|
||||
impl TimerState {
|
||||
pub(super) fn new(enabled: bool) -> Self {
|
||||
if enabled {
|
||||
Self::Inactive
|
||||
} else {
|
||||
Self::Disabled
|
||||
}
|
||||
}
|
||||
|
||||
pub(super) fn is_enabled(&self) -> bool {
|
||||
matches!(self, Self::Active { .. } | Self::Inactive)
|
||||
}
|
||||
|
||||
pub(super) fn set(&mut self, timer: Sleep, line: u32) {
|
||||
if matches!(self, Self::Disabled) {
|
||||
trace!("setting disabled timer from line {}", line);
|
||||
}
|
||||
|
||||
*self = Self::Active {
|
||||
timer: Box::pin(timer),
|
||||
};
|
||||
}
|
||||
|
||||
pub(super) fn set_and_init(&mut self, cx: &mut Context<'_>, timer: Sleep, line: u32) {
|
||||
self.set(timer, line);
|
||||
self.init(cx);
|
||||
}
|
||||
|
||||
pub(super) fn clear(&mut self, line: u32) {
|
||||
if matches!(self, Self::Disabled) {
|
||||
trace!("trying to clear a disabled timer from line {}", line);
|
||||
}
|
||||
|
||||
if matches!(self, Self::Inactive) {
|
||||
trace!("trying to clear an inactive timer from line {}", line);
|
||||
}
|
||||
|
||||
*self = Self::Inactive;
|
||||
}
|
||||
|
||||
pub(super) fn init(&mut self, cx: &mut Context<'_>) {
|
||||
if let TimerState::Active { timer } = self {
|
||||
let _ = timer.as_mut().poll(cx);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for TimerState {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
TimerState::Disabled => f.write_str("timer is disabled"),
|
||||
TimerState::Inactive => f.write_str("timer is inactive"),
|
||||
TimerState::Active { timer } => {
|
||||
let deadline = timer.deadline();
|
||||
let now = Instant::now();
|
||||
|
||||
if deadline < now {
|
||||
f.write_str("timer is active and has reached deadline")
|
||||
} else {
|
||||
write!(
|
||||
f,
|
||||
"timer is active and due to expire in {} milliseconds",
|
||||
((deadline - now).as_secs_f32() * 1000.0)
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
@ -19,15 +19,17 @@ use h2::{
|
||||
server::{Connection, SendResponse},
|
||||
Ping, PingPong,
|
||||
};
|
||||
use log::{error, trace};
|
||||
use pin_project_lite::pin_project;
|
||||
use tracing::{error, trace, warn};
|
||||
|
||||
use crate::{
|
||||
body::{BodySize, BoxBody, MessageBody},
|
||||
config::ServiceConfig,
|
||||
header::{HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING},
|
||||
header::{
|
||||
HeaderName, HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING, UPGRADE,
|
||||
},
|
||||
service::HttpFlow,
|
||||
Extensions, OnConnectData, Payload, Request, Response, ResponseHead,
|
||||
Extensions, Method, OnConnectData, Payload, Request, Response, ResponseHead,
|
||||
};
|
||||
|
||||
const CHUNK_SIZE: usize = 16_384;
|
||||
@ -57,15 +59,15 @@ where
|
||||
conn_data: OnConnectData,
|
||||
timer: Option<Pin<Box<Sleep>>>,
|
||||
) -> Self {
|
||||
let ping_pong = config.keep_alive().map(|dur| H2PingPong {
|
||||
let ping_pong = config.keep_alive().duration().map(|dur| H2PingPong {
|
||||
timer: timer
|
||||
.map(|mut timer| {
|
||||
// reset timer if it's received from new function.
|
||||
timer.as_mut().reset(config.now() + dur);
|
||||
// reuse timer slot if it was initialized for handshake
|
||||
timer.as_mut().reset((config.now() + dur).into());
|
||||
timer
|
||||
})
|
||||
.unwrap_or_else(|| Box::pin(sleep(dur))),
|
||||
on_flight: false,
|
||||
in_flight: false,
|
||||
ping_pong: conn.ping_pong().unwrap(),
|
||||
});
|
||||
|
||||
@ -82,9 +84,14 @@ where
|
||||
}
|
||||
|
||||
struct H2PingPong {
|
||||
timer: Pin<Box<Sleep>>,
|
||||
on_flight: bool,
|
||||
/// Handle to send ping frames from the peer.
|
||||
ping_pong: PingPong,
|
||||
|
||||
/// True when a ping has been sent and is waiting for a reply.
|
||||
in_flight: bool,
|
||||
|
||||
/// Timeout for pong response.
|
||||
timer: Pin<Box<Sleep>>,
|
||||
}
|
||||
|
||||
impl<T, S, B, X, U> Future for Dispatcher<T, S, B, X, U>
|
||||
@ -111,6 +118,7 @@ where
|
||||
let payload = crate::h2::Payload::new(body);
|
||||
let pl = Payload::H2 { payload };
|
||||
let mut req = Request::with_payload(pl);
|
||||
let head_req = parts.method == Method::HEAD;
|
||||
|
||||
let head = req.head_mut();
|
||||
head.uri = parts.uri;
|
||||
@ -128,10 +136,10 @@ where
|
||||
actix_rt::spawn(async move {
|
||||
// resolve service call and send response.
|
||||
let res = match fut.await {
|
||||
Ok(res) => handle_response(res.into(), tx, config).await,
|
||||
Ok(res) => handle_response(res.into(), tx, config, head_req).await,
|
||||
Err(err) => {
|
||||
let res: Response<BoxBody> = err.into();
|
||||
handle_response(res, tx, config).await
|
||||
handle_response(res, tx, config, head_req).await
|
||||
}
|
||||
};
|
||||
|
||||
@ -150,34 +158,36 @@ where
|
||||
});
|
||||
}
|
||||
Poll::Ready(None) => return Poll::Ready(Ok(())),
|
||||
|
||||
Poll::Pending => match this.ping_pong.as_mut() {
|
||||
Some(ping_pong) => loop {
|
||||
if ping_pong.on_flight {
|
||||
// When have on flight ping pong. poll pong and and keep alive timer.
|
||||
// on success pong received update keep alive timer to determine the next timing of
|
||||
// ping pong.
|
||||
if ping_pong.in_flight {
|
||||
// When there is an in-flight ping-pong, poll pong and and keep-alive
|
||||
// timer. On successful pong received, update keep-alive timer to
|
||||
// determine the next timing of ping pong.
|
||||
match ping_pong.ping_pong.poll_pong(cx)? {
|
||||
Poll::Ready(_) => {
|
||||
ping_pong.on_flight = false;
|
||||
ping_pong.in_flight = false;
|
||||
|
||||
let dead_line = this.config.keep_alive_expire().unwrap();
|
||||
ping_pong.timer.as_mut().reset(dead_line);
|
||||
let dead_line = this.config.keep_alive_deadline().unwrap();
|
||||
ping_pong.timer.as_mut().reset(dead_line.into());
|
||||
}
|
||||
Poll::Pending => {
|
||||
return ping_pong.timer.as_mut().poll(cx).map(|_| Ok(()))
|
||||
return ping_pong.timer.as_mut().poll(cx).map(|_| Ok(()));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// When there is no on flight ping pong. keep alive timer is used to wait for next
|
||||
// timing of ping pong. Therefore at this point it serves as an interval instead.
|
||||
// When there is no in-flight ping-pong, keep-alive timer is used to
|
||||
// wait for next timing of ping-pong. Therefore, at this point it serves
|
||||
// as an interval instead.
|
||||
ready!(ping_pong.timer.as_mut().poll(cx));
|
||||
|
||||
ping_pong.ping_pong.send_ping(Ping::opaque())?;
|
||||
|
||||
let dead_line = this.config.keep_alive_expire().unwrap();
|
||||
ping_pong.timer.as_mut().reset(dead_line);
|
||||
let dead_line = this.config.keep_alive_deadline().unwrap();
|
||||
ping_pong.timer.as_mut().reset(dead_line.into());
|
||||
|
||||
ping_pong.on_flight = true;
|
||||
ping_pong.in_flight = true;
|
||||
}
|
||||
},
|
||||
None => return Poll::Pending,
|
||||
@ -197,6 +207,7 @@ async fn handle_response<B>(
|
||||
res: Response<B>,
|
||||
mut tx: SendResponse<Bytes>,
|
||||
config: ServiceConfig,
|
||||
head_req: bool,
|
||||
) -> Result<(), DispatchError>
|
||||
where
|
||||
B: MessageBody,
|
||||
@ -206,14 +217,14 @@ where
|
||||
// prepare response.
|
||||
let mut size = body.size();
|
||||
let res = prepare_response(config, res.head(), &mut size);
|
||||
let eof = size.is_eof();
|
||||
let eof_or_head = size.is_eof() || head_req;
|
||||
|
||||
// send response head and return on eof.
|
||||
let mut stream = tx
|
||||
.send_response(res, eof)
|
||||
.send_response(res, eof_or_head)
|
||||
.map_err(DispatchError::SendResponse)?;
|
||||
|
||||
if eof {
|
||||
if eof_or_head {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@ -285,13 +296,13 @@ fn prepare_response(
|
||||
_ => {}
|
||||
}
|
||||
|
||||
let _ = match size {
|
||||
BodySize::None | BodySize::Stream => None,
|
||||
match size {
|
||||
BodySize::None | BodySize::Stream => {}
|
||||
|
||||
BodySize::Sized(0) => {
|
||||
#[allow(clippy::declare_interior_mutable_const)]
|
||||
const HV_ZERO: HeaderValue = HeaderValue::from_static("0");
|
||||
res.headers_mut().insert(CONTENT_LENGTH, HV_ZERO)
|
||||
res.headers_mut().insert(CONTENT_LENGTH, HV_ZERO);
|
||||
}
|
||||
|
||||
BodySize::Sized(len) => {
|
||||
@ -300,19 +311,28 @@ fn prepare_response(
|
||||
res.headers_mut().insert(
|
||||
CONTENT_LENGTH,
|
||||
HeaderValue::from_str(buf.format(*len)).unwrap(),
|
||||
)
|
||||
);
|
||||
}
|
||||
};
|
||||
|
||||
// copy headers
|
||||
for (key, value) in head.headers.iter() {
|
||||
match *key {
|
||||
// TODO: consider skipping other headers according to:
|
||||
// https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.2
|
||||
// omit HTTP/1.x only headers
|
||||
CONNECTION | TRANSFER_ENCODING => continue,
|
||||
CONTENT_LENGTH if skip_len => continue,
|
||||
DATE => has_date = true,
|
||||
match key {
|
||||
// omit HTTP/1.x only headers according to:
|
||||
// https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.2
|
||||
&CONNECTION | &TRANSFER_ENCODING | &UPGRADE => continue,
|
||||
|
||||
&CONTENT_LENGTH if skip_len => continue,
|
||||
&DATE => has_date = true,
|
||||
|
||||
// omit HTTP/1.x only headers according to:
|
||||
// https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.2
|
||||
hdr if hdr == HeaderName::from_static("keep-alive")
|
||||
|| hdr == HeaderName::from_static("proxy-connection") =>
|
||||
{
|
||||
continue
|
||||
}
|
||||
|
||||
_ => {}
|
||||
}
|
||||
|
||||
@ -322,7 +342,7 @@ fn prepare_response(
|
||||
// set date header
|
||||
if !has_date {
|
||||
let mut bytes = BytesMut::with_capacity(29);
|
||||
config.set_date_header(&mut bytes);
|
||||
config.write_date_header_value(&mut bytes);
|
||||
res.headers_mut().insert(
|
||||
DATE,
|
||||
// SAFETY: serialized date-times are known ASCII strings
|
||||
|
@ -7,7 +7,7 @@ use std::{
|
||||
};
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite};
|
||||
use actix_rt::time::Sleep;
|
||||
use actix_rt::time::{sleep_until, Sleep};
|
||||
use bytes::Bytes;
|
||||
use futures_core::{ready, Stream};
|
||||
use h2::{
|
||||
@ -15,17 +15,17 @@ use h2::{
|
||||
RecvStream,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
config::ServiceConfig,
|
||||
error::{DispatchError, PayloadError},
|
||||
};
|
||||
|
||||
mod dispatcher;
|
||||
mod service;
|
||||
|
||||
pub use self::dispatcher::Dispatcher;
|
||||
pub use self::service::H2Service;
|
||||
|
||||
use crate::{
|
||||
config::ServiceConfig,
|
||||
error::{DispatchError, PayloadError},
|
||||
};
|
||||
|
||||
/// HTTP/2 peer stream.
|
||||
pub struct Payload {
|
||||
stream: RecvStream,
|
||||
@ -67,7 +67,9 @@ where
|
||||
{
|
||||
HandshakeWithTimeout {
|
||||
handshake: handshake(io),
|
||||
timer: config.client_timer().map(Box::pin),
|
||||
timer: config
|
||||
.client_request_deadline()
|
||||
.map(|deadline| Box::pin(sleep_until(deadline.into()))),
|
||||
}
|
||||
}
|
||||
|
||||
@ -86,7 +88,7 @@ where
|
||||
let this = self.get_mut();
|
||||
|
||||
match Pin::new(&mut this.handshake).poll(cx)? {
|
||||
// return the timer on success handshake. It can be re-used for h2 ping-pong.
|
||||
// return the timer on success handshake; its slot can be re-used for h2 ping-pong
|
||||
Poll::Ready(conn) => Poll::Ready(Ok((conn, this.timer.take()))),
|
||||
Poll::Pending => match this.timer.as_mut() {
|
||||
Some(timer) => {
|
||||
@ -101,11 +103,9 @@ where
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::panic::{RefUnwindSafe, UnwindSafe};
|
||||
|
||||
use static_assertions::assert_impl_all;
|
||||
|
||||
use super::*;
|
||||
|
||||
assert_impl_all!(Payload: Unpin, Send, Sync, UnwindSafe, RefUnwindSafe);
|
||||
assert_impl_all!(Payload: Unpin, Send, Sync);
|
||||
}
|
||||
|
@ -14,7 +14,7 @@ use actix_service::{
|
||||
};
|
||||
use actix_utils::future::ready;
|
||||
use futures_core::{future::LocalBoxFuture, ready};
|
||||
use log::error;
|
||||
use tracing::{error, trace};
|
||||
|
||||
use crate::{
|
||||
body::{BoxBody, MessageBody},
|
||||
@ -117,6 +117,7 @@ mod openssl {
|
||||
B: MessageBody + 'static,
|
||||
{
|
||||
/// Create OpenSSL based service.
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "openssl")))]
|
||||
pub fn openssl(
|
||||
self,
|
||||
acceptor: SslAcceptor,
|
||||
@ -164,6 +165,7 @@ mod rustls {
|
||||
B: MessageBody + 'static,
|
||||
{
|
||||
/// Create Rustls based service.
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "rustls")))]
|
||||
pub fn rustls(
|
||||
self,
|
||||
mut config: ServerConfig,
|
||||
|
53
actix-http/src/header/common.rs
Normal file
53
actix-http/src/header/common.rs
Normal file
@ -0,0 +1,53 @@
|
||||
//! Common header names not defined in [`http`].
|
||||
//!
|
||||
//! Any headers added to this file will need to be re-exported from the list at `crate::headers`.
|
||||
|
||||
use http::header::HeaderName;
|
||||
|
||||
/// Response header field that indicates how caches have handled that response and its corresponding
|
||||
/// request.
|
||||
///
|
||||
/// See [RFC 9211](https://www.rfc-editor.org/rfc/rfc9211) for full semantics.
|
||||
// TODO(breaking): replace with http's version
|
||||
pub const CACHE_STATUS: HeaderName = HeaderName::from_static("cache-status");
|
||||
|
||||
/// Response header field that allows origin servers to control the behavior of CDN caches
|
||||
/// interposed between them and clients separately from other caches that might handle the response.
|
||||
///
|
||||
/// See [RFC 9213](https://www.rfc-editor.org/rfc/rfc9213) for full semantics.
|
||||
// TODO(breaking): replace with http's version
|
||||
pub const CDN_CACHE_CONTROL: HeaderName = HeaderName::from_static("cdn-cache-control");
|
||||
|
||||
/// Response header that prevents a document from loading any cross-origin resources that don't
|
||||
/// explicitly grant the document permission (using [CORP] or [CORS]).
|
||||
///
|
||||
/// [CORP]: https://developer.mozilla.org/en-US/docs/Web/HTTP/Cross-Origin_Resource_Policy_(CORP)
|
||||
/// [CORS]: https://developer.mozilla.org/en-US/docs/Web/HTTP/CORS
|
||||
pub const CROSS_ORIGIN_EMBEDDER_POLICY: HeaderName =
|
||||
HeaderName::from_static("cross-origin-embedder-policy");
|
||||
|
||||
/// Response header that allows you to ensure a top-level document does not share a browsing context
|
||||
/// group with cross-origin documents.
|
||||
pub const CROSS_ORIGIN_OPENER_POLICY: HeaderName =
|
||||
HeaderName::from_static("cross-origin-opener-policy");
|
||||
|
||||
/// Response header that conveys a desire that the browser blocks no-cors cross-origin/cross-site
|
||||
/// requests to the given resource.
|
||||
pub const CROSS_ORIGIN_RESOURCE_POLICY: HeaderName =
|
||||
HeaderName::from_static("cross-origin-resource-policy");
|
||||
|
||||
/// Response header that provides a mechanism to allow and deny the use of browser features in a
|
||||
/// document or within any `<iframe>` elements in the document.
|
||||
pub const PERMISSIONS_POLICY: HeaderName = HeaderName::from_static("permissions-policy");
|
||||
|
||||
/// Request header (de-facto standard) for identifying the originating IP address of a client
|
||||
/// connecting to a web server through a proxy server.
|
||||
pub const X_FORWARDED_FOR: HeaderName = HeaderName::from_static("x-forwarded-for");
|
||||
|
||||
/// Request header (de-facto standard) for identifying the original host requested by the client in
|
||||
/// the `Host` HTTP request header.
|
||||
pub const X_FORWARDED_HOST: HeaderName = HeaderName::from_static("x-forwarded-host");
|
||||
|
||||
/// Request header (de-facto standard) for identifying the protocol that a client used to connect to
|
||||
/// your proxy or load balancer.
|
||||
pub const X_FORWARDED_PROTO: HeaderName = HeaderName::from_static("x-forwarded-proto");
|
@ -150,9 +150,7 @@ impl HeaderMap {
|
||||
/// assert_eq!(map.len(), 3);
|
||||
/// ```
|
||||
pub fn len(&self) -> usize {
|
||||
self.inner
|
||||
.iter()
|
||||
.fold(0, |acc, (_, values)| acc + values.len())
|
||||
self.inner.values().map(|vals| vals.len()).sum()
|
||||
}
|
||||
|
||||
/// Returns the number of _keys_ stored in the map.
|
||||
@ -309,7 +307,7 @@ impl HeaderMap {
|
||||
pub fn get_all(&self, key: impl AsHeaderName) -> std::slice::Iter<'_, HeaderValue> {
|
||||
match self.get_value(key) {
|
||||
Some(value) => value.iter(),
|
||||
None => (&[]).iter(),
|
||||
None => [].iter(),
|
||||
}
|
||||
}
|
||||
|
||||
@ -552,6 +550,39 @@ impl HeaderMap {
|
||||
Keys(self.inner.keys())
|
||||
}
|
||||
|
||||
/// Retains only the headers specified by the predicate.
|
||||
///
|
||||
/// In other words, removes all headers `(name, val)` for which `retain_fn(&name, &mut val)`
|
||||
/// returns false.
|
||||
///
|
||||
/// The order in which headers are visited should be considered arbitrary.
|
||||
///
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// # use actix_http::header::{self, HeaderMap, HeaderValue};
|
||||
/// let mut map = HeaderMap::new();
|
||||
///
|
||||
/// map.append(header::HOST, HeaderValue::from_static("duck.com"));
|
||||
/// map.append(header::SET_COOKIE, HeaderValue::from_static("one=1"));
|
||||
/// map.append(header::SET_COOKIE, HeaderValue::from_static("two=2"));
|
||||
///
|
||||
/// map.retain(|name, val| val.as_bytes().starts_with(b"one"));
|
||||
///
|
||||
/// assert_eq!(map.len(), 1);
|
||||
/// assert!(map.contains_key(&header::SET_COOKIE));
|
||||
/// ```
|
||||
pub fn retain<F>(&mut self, mut retain_fn: F)
|
||||
where
|
||||
F: FnMut(&HeaderName, &mut HeaderValue) -> bool,
|
||||
{
|
||||
self.inner.retain(|name, vals| {
|
||||
vals.inner.retain(|val| retain_fn(name, val));
|
||||
|
||||
// invariant: make sure newly empty value lists are removed
|
||||
!vals.is_empty()
|
||||
})
|
||||
}
|
||||
|
||||
/// Clears the map, returning all name-value sets as an iterator.
|
||||
///
|
||||
/// Header names will only be yielded for the first value in each set. All items that are
|
||||
@ -630,7 +661,7 @@ impl Removed {
|
||||
/// Returns true if iterator contains no elements, without consuming it.
|
||||
///
|
||||
/// If called immediately after [`HeaderMap::insert`] or [`HeaderMap::remove`], it will indicate
|
||||
/// wether any items were actually replaced or removed, respectively.
|
||||
/// whether any items were actually replaced or removed, respectively.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
match self.inner {
|
||||
// size hint lower bound of smallvec is the correct length
|
||||
@ -943,6 +974,55 @@ mod tests {
|
||||
assert!(map.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn retain() {
|
||||
let mut map = HeaderMap::new();
|
||||
|
||||
map.append(header::LOCATION, HeaderValue::from_static("/test"));
|
||||
map.append(header::HOST, HeaderValue::from_static("duck.com"));
|
||||
map.append(header::COOKIE, HeaderValue::from_static("one=1"));
|
||||
map.append(header::COOKIE, HeaderValue::from_static("two=2"));
|
||||
|
||||
assert_eq!(map.len(), 4);
|
||||
|
||||
// by value
|
||||
map.retain(|_, val| !val.as_bytes().contains(&b'/'));
|
||||
assert_eq!(map.len(), 3);
|
||||
|
||||
// by name
|
||||
map.retain(|name, _| name.as_str() != "cookie");
|
||||
assert_eq!(map.len(), 1);
|
||||
|
||||
// keep but mutate value
|
||||
map.retain(|_, val| {
|
||||
*val = HeaderValue::from_static("replaced");
|
||||
true
|
||||
});
|
||||
assert_eq!(map.len(), 1);
|
||||
assert_eq!(map.get("host").unwrap(), "replaced");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn retain_removes_empty_value_lists() {
|
||||
let mut map = HeaderMap::with_capacity(3);
|
||||
|
||||
map.append(header::HOST, HeaderValue::from_static("duck.com"));
|
||||
map.append(header::HOST, HeaderValue::from_static("duck.com"));
|
||||
|
||||
assert_eq!(map.len(), 2);
|
||||
assert_eq!(map.len_keys(), 1);
|
||||
assert_eq!(map.inner.len(), 1);
|
||||
assert_eq!(map.capacity(), 3);
|
||||
|
||||
// remove everything
|
||||
map.retain(|_n, _v| false);
|
||||
|
||||
assert_eq!(map.len(), 0);
|
||||
assert_eq!(map.len_keys(), 0);
|
||||
assert_eq!(map.inner.len(), 0);
|
||||
assert_eq!(map.capacity(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn entries_into_iter() {
|
||||
let mut map = HeaderMap::new();
|
||||
|
@ -1,14 +1,18 @@
|
||||
//! Pre-defined `HeaderName`s, traits for parsing and conversion, and other header utility methods.
|
||||
|
||||
// declaring new header consts will yield this error
|
||||
#![allow(clippy::declare_interior_mutable_const)]
|
||||
|
||||
use percent_encoding::{AsciiSet, CONTROLS};
|
||||
|
||||
// re-export from http except header map related items
|
||||
pub use http::header::{
|
||||
pub use ::http::header::{
|
||||
HeaderName, HeaderValue, InvalidHeaderName, InvalidHeaderValue, ToStrError,
|
||||
};
|
||||
|
||||
// re-export const header names
|
||||
pub use http::header::{
|
||||
// re-export const header names, list is explicit so that any updates to `common` module do not
|
||||
// conflict with this set
|
||||
pub use ::http::header::{
|
||||
ACCEPT, ACCEPT_CHARSET, ACCEPT_ENCODING, ACCEPT_LANGUAGE, ACCEPT_RANGES,
|
||||
ACCESS_CONTROL_ALLOW_CREDENTIALS, ACCESS_CONTROL_ALLOW_HEADERS,
|
||||
ACCESS_CONTROL_ALLOW_METHODS, ACCESS_CONTROL_ALLOW_ORIGIN, ACCESS_CONTROL_EXPOSE_HEADERS,
|
||||
@ -30,22 +34,30 @@ pub use http::header::{
|
||||
use crate::{error::ParseError, HttpMessage};
|
||||
|
||||
mod as_name;
|
||||
mod common;
|
||||
mod into_pair;
|
||||
mod into_value;
|
||||
pub mod map;
|
||||
mod shared;
|
||||
mod utils;
|
||||
|
||||
pub use self::as_name::AsHeaderName;
|
||||
pub use self::into_pair::TryIntoHeaderPair;
|
||||
pub use self::into_value::TryIntoHeaderValue;
|
||||
pub use self::map::HeaderMap;
|
||||
pub use self::shared::{
|
||||
parse_extended_value, q, Charset, ContentEncoding, ExtendedValue, HttpDate, LanguageTag,
|
||||
Quality, QualityItem,
|
||||
pub use self::{
|
||||
as_name::AsHeaderName,
|
||||
into_pair::TryIntoHeaderPair,
|
||||
into_value::TryIntoHeaderValue,
|
||||
map::HeaderMap,
|
||||
shared::{
|
||||
parse_extended_value, q, Charset, ContentEncoding, ExtendedValue, HttpDate,
|
||||
LanguageTag, Quality, QualityItem,
|
||||
},
|
||||
utils::{fmt_comma_delimited, from_comma_delimited, from_one_raw_str, http_percent_encode},
|
||||
};
|
||||
pub use self::utils::{
|
||||
fmt_comma_delimited, from_comma_delimited, from_one_raw_str, http_percent_encode,
|
||||
|
||||
// re-export list is explicit so that any updates to `http` do not conflict with this set
|
||||
pub use self::common::{
|
||||
CACHE_STATUS, CDN_CACHE_CONTROL, CROSS_ORIGIN_EMBEDDER_POLICY, CROSS_ORIGIN_OPENER_POLICY,
|
||||
CROSS_ORIGIN_RESOURCE_POLICY, PERMISSIONS_POLICY, X_FORWARDED_FOR, X_FORWARDED_HOST,
|
||||
X_FORWARDED_PROTO,
|
||||
};
|
||||
|
||||
/// An interface for types that already represent a valid header.
|
||||
|
@ -12,7 +12,7 @@ use crate::header::{Charset, HTTP_VALUE};
|
||||
/// - A character sequence representing the actual value (`value`), separated by single quotes.
|
||||
///
|
||||
/// It is defined in [RFC 5987 §3.2](https://datatracker.ietf.org/doc/html/rfc5987#section-3.2).
|
||||
#[derive(Clone, Debug, PartialEq)]
|
||||
#[derive(Clone, Debug, PartialEq, Eq)]
|
||||
pub struct ExtendedValue {
|
||||
/// The character set that is used to encode the `value` to a string.
|
||||
pub charset: Charset,
|
||||
|
@ -4,8 +4,7 @@ use bytes::BytesMut;
|
||||
use http::header::{HeaderValue, InvalidHeaderValue};
|
||||
|
||||
use crate::{
|
||||
config::DATE_VALUE_LENGTH, error::ParseError, header::TryIntoHeaderValue,
|
||||
helpers::MutWriter,
|
||||
date::DATE_VALUE_LENGTH, error::ParseError, header::TryIntoHeaderValue, helpers::MutWriter,
|
||||
};
|
||||
|
||||
/// A timestamp with HTTP-style formatting and parsing.
|
||||
|
@ -147,7 +147,7 @@ mod tests {
|
||||
|
||||
// copy of encoding from actix-web headers
|
||||
#[allow(clippy::enum_variant_names)] // allow Encoding prefix on EncodingExt
|
||||
#[derive(Clone, PartialEq, Debug)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum Encoding {
|
||||
Chunked,
|
||||
Brotli,
|
||||
|
@ -55,7 +55,7 @@ pub trait HttpMessage: Sized {
|
||||
""
|
||||
}
|
||||
|
||||
/// Get content type encoding
|
||||
/// Get content type encoding.
|
||||
///
|
||||
/// UTF-8 is used by default, If request charset is not set.
|
||||
fn encoding(&self) -> Result<&'static Encoding, ContentTypeError> {
|
||||
|
84
actix-http/src/keep_alive.rs
Normal file
84
actix-http/src/keep_alive.rs
Normal file
@ -0,0 +1,84 @@
|
||||
use std::time::Duration;
|
||||
|
||||
/// Connection keep-alive config.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum KeepAlive {
|
||||
/// Keep-alive duration.
|
||||
///
|
||||
/// `KeepAlive::Timeout(Duration::ZERO)` is mapped to `KeepAlive::Disabled`.
|
||||
Timeout(Duration),
|
||||
|
||||
/// Rely on OS to shutdown TCP connection.
|
||||
///
|
||||
/// Some defaults can be very long, check your OS documentation.
|
||||
Os,
|
||||
|
||||
/// Keep-alive is disabled.
|
||||
///
|
||||
/// Connections will be closed immediately.
|
||||
Disabled,
|
||||
}
|
||||
|
||||
impl KeepAlive {
|
||||
pub(crate) fn enabled(&self) -> bool {
|
||||
!matches!(self, Self::Disabled)
|
||||
}
|
||||
|
||||
#[allow(unused)] // used with `http2` feature flag
|
||||
pub(crate) fn duration(&self) -> Option<Duration> {
|
||||
match self {
|
||||
KeepAlive::Timeout(dur) => Some(*dur),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Map zero duration to disabled.
|
||||
pub(crate) fn normalize(self) -> KeepAlive {
|
||||
match self {
|
||||
KeepAlive::Timeout(Duration::ZERO) => KeepAlive::Disabled,
|
||||
ka => ka,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for KeepAlive {
|
||||
fn default() -> Self {
|
||||
Self::Timeout(Duration::from_secs(5))
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Duration> for KeepAlive {
|
||||
fn from(dur: Duration) -> Self {
|
||||
KeepAlive::Timeout(dur).normalize()
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Option<Duration>> for KeepAlive {
|
||||
fn from(ka_dur: Option<Duration>) -> Self {
|
||||
match ka_dur {
|
||||
Some(dur) => KeepAlive::from(dur),
|
||||
None => KeepAlive::Disabled,
|
||||
}
|
||||
.normalize()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn from_impls() {
|
||||
let test: KeepAlive = Duration::from_secs(1).into();
|
||||
assert_eq!(test, KeepAlive::Timeout(Duration::from_secs(1)));
|
||||
|
||||
let test: KeepAlive = Duration::from_secs(0).into();
|
||||
assert_eq!(test, KeepAlive::Disabled);
|
||||
|
||||
let test: KeepAlive = Some(Duration::from_secs(0)).into();
|
||||
assert_eq!(test, KeepAlive::Disabled);
|
||||
|
||||
let test: KeepAlive = None.into();
|
||||
assert_eq!(test, KeepAlive::Disabled);
|
||||
}
|
||||
}
|
@ -3,6 +3,7 @@
|
||||
//! ## Crate Features
|
||||
//! | Feature | Functionality |
|
||||
//! | ------------------- | ------------------------------------------- |
|
||||
//! | `http2` | HTTP/2 support via [h2]. |
|
||||
//! | `openssl` | TLS support via [OpenSSL]. |
|
||||
//! | `rustls` | TLS support via [rustls]. |
|
||||
//! | `compress-brotli` | Payload compression support: Brotli. |
|
||||
@ -10,6 +11,7 @@
|
||||
//! | `compress-zstd` | Payload compression support: Zstd. |
|
||||
//! | `trust-dns` | Use [trust-dns] as the client DNS resolver. |
|
||||
//!
|
||||
//! [h2]: https://crates.io/crates/h2
|
||||
//! [OpenSSL]: https://crates.io/crates/openssl
|
||||
//! [rustls]: https://crates.io/crates/rustls
|
||||
//! [trust-dns]: https://crates.io/crates/trust-dns
|
||||
@ -19,13 +21,12 @@
|
||||
#![allow(
|
||||
clippy::type_complexity,
|
||||
clippy::too_many_arguments,
|
||||
clippy::borrow_interior_mutable_const
|
||||
clippy::borrow_interior_mutable_const,
|
||||
clippy::uninlined_format_args
|
||||
)]
|
||||
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
|
||||
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
|
||||
|
||||
#[macro_use]
|
||||
extern crate log;
|
||||
#![cfg_attr(docsrs, feature(doc_cfg))]
|
||||
|
||||
pub use ::http::{uri, uri::Uri};
|
||||
pub use ::http::{Method, StatusCode, Version};
|
||||
@ -33,29 +34,38 @@ pub use ::http::{Method, StatusCode, Version};
|
||||
pub mod body;
|
||||
mod builder;
|
||||
mod config;
|
||||
mod date;
|
||||
#[cfg(feature = "__compress")]
|
||||
pub mod encoding;
|
||||
pub mod error;
|
||||
mod extensions;
|
||||
pub mod h1;
|
||||
#[cfg(feature = "http2")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
|
||||
pub mod h2;
|
||||
pub mod header;
|
||||
mod helpers;
|
||||
mod http_message;
|
||||
mod keep_alive;
|
||||
mod message;
|
||||
#[cfg(test)]
|
||||
mod notify_on_drop;
|
||||
mod payload;
|
||||
mod requests;
|
||||
mod responses;
|
||||
mod service;
|
||||
pub mod test;
|
||||
#[cfg(feature = "ws")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "ws")))]
|
||||
pub mod ws;
|
||||
|
||||
pub use self::builder::HttpServiceBuilder;
|
||||
pub use self::config::{KeepAlive, ServiceConfig};
|
||||
pub use self::config::ServiceConfig;
|
||||
pub use self::error::Error;
|
||||
pub use self::extensions::Extensions;
|
||||
pub use self::header::ContentEncoding;
|
||||
pub use self::http_message::HttpMessage;
|
||||
pub use self::keep_alive::KeepAlive;
|
||||
pub use self::message::ConnectionType;
|
||||
pub use self::message::Message;
|
||||
#[allow(deprecated)]
|
||||
@ -63,6 +73,9 @@ pub use self::payload::{BoxedPayloadStream, Payload, PayloadStream};
|
||||
pub use self::requests::{Request, RequestHead, RequestHeadType};
|
||||
pub use self::responses::{Response, ResponseBuilder, ResponseHead};
|
||||
pub use self::service::HttpService;
|
||||
#[cfg(any(feature = "openssl", feature = "rustls"))]
|
||||
#[cfg_attr(docsrs, doc(cfg(any(feature = "openssl", feature = "rustls"))))]
|
||||
pub use self::service::TlsAcceptorConfig;
|
||||
|
||||
/// A major HTTP protocol version.
|
||||
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
|
||||
|
@ -3,7 +3,7 @@ use std::{cell::RefCell, ops, rc::Rc};
|
||||
use bitflags::bitflags;
|
||||
|
||||
/// Represents various types of connection
|
||||
#[derive(Copy, Clone, PartialEq, Debug)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum ConnectionType {
|
||||
/// Close connection after response.
|
||||
Close,
|
||||
|
49
actix-http/src/notify_on_drop.rs
Normal file
49
actix-http/src/notify_on_drop.rs
Normal file
@ -0,0 +1,49 @@
|
||||
/// Test Module for checking the drop state of certain async tasks that are spawned
|
||||
/// with `actix_rt::spawn`
|
||||
///
|
||||
/// The target task must explicitly generate `NotifyOnDrop` when spawn the task
|
||||
use std::cell::RefCell;
|
||||
|
||||
thread_local! {
|
||||
static NOTIFY_DROPPED: RefCell<Option<bool>> = RefCell::new(None);
|
||||
}
|
||||
|
||||
/// Check if the spawned task is dropped.
|
||||
///
|
||||
/// # Panics
|
||||
/// Panics when there was no `NotifyOnDrop` instance on current thread.
|
||||
pub(crate) fn is_dropped() -> bool {
|
||||
NOTIFY_DROPPED.with(|bool| {
|
||||
bool.borrow()
|
||||
.expect("No NotifyOnDrop existed on current thread")
|
||||
})
|
||||
}
|
||||
|
||||
pub(crate) struct NotifyOnDrop;
|
||||
|
||||
impl NotifyOnDrop {
|
||||
/// # Panics
|
||||
/// Panics hen construct multiple instances on any given thread.
|
||||
pub(crate) fn new() -> Self {
|
||||
NOTIFY_DROPPED.with(|bool| {
|
||||
let mut bool = bool.borrow_mut();
|
||||
if bool.is_some() {
|
||||
panic!("NotifyOnDrop existed on current thread");
|
||||
} else {
|
||||
*bool = Some(false);
|
||||
}
|
||||
});
|
||||
|
||||
NotifyOnDrop
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for NotifyOnDrop {
|
||||
fn drop(&mut self) {
|
||||
NOTIFY_DROPPED.with(|bool| {
|
||||
if let Some(b) = bool.borrow_mut().as_mut() {
|
||||
*b = true;
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
@ -6,16 +6,30 @@ use std::{
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures_core::Stream;
|
||||
use pin_project_lite::pin_project;
|
||||
|
||||
use crate::error::PayloadError;
|
||||
|
||||
/// A boxed payload stream.
|
||||
pub type BoxedPayloadStream = Pin<Box<dyn Stream<Item = Result<Bytes, PayloadError>>>>;
|
||||
|
||||
#[deprecated(since = "4.0.0", note = "Renamed to `BoxedPayloadStream`.")]
|
||||
#[doc(hidden)]
|
||||
#[deprecated(since = "3.0.0", note = "Renamed to `BoxedPayloadStream`.")]
|
||||
pub type PayloadStream = BoxedPayloadStream;
|
||||
|
||||
pin_project_lite::pin_project! {
|
||||
#[cfg(not(feature = "http2"))]
|
||||
pin_project! {
|
||||
/// A streaming payload.
|
||||
#[project = PayloadProj]
|
||||
pub enum Payload<S = BoxedPayloadStream> {
|
||||
None,
|
||||
H1 { payload: crate::h1::Payload },
|
||||
Stream { #[pin] payload: S },
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "http2")]
|
||||
pin_project! {
|
||||
/// A streaming payload.
|
||||
#[project = PayloadProj]
|
||||
pub enum Payload<S = BoxedPayloadStream> {
|
||||
@ -32,14 +46,16 @@ impl<S> From<crate::h1::Payload> for Payload<S> {
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "http2")]
|
||||
impl<S> From<crate::h2::Payload> for Payload<S> {
|
||||
fn from(payload: crate::h2::Payload) -> Self {
|
||||
Payload::H2 { payload }
|
||||
}
|
||||
}
|
||||
|
||||
impl<S> From<h2::RecvStream> for Payload<S> {
|
||||
fn from(stream: h2::RecvStream) -> Self {
|
||||
#[cfg(feature = "http2")]
|
||||
impl<S> From<::h2::RecvStream> for Payload<S> {
|
||||
fn from(stream: ::h2::RecvStream) -> Self {
|
||||
Payload::H2 {
|
||||
payload: crate::h2::Payload::new(stream),
|
||||
}
|
||||
@ -70,7 +86,10 @@ where
|
||||
match self.project() {
|
||||
PayloadProj::None => Poll::Ready(None),
|
||||
PayloadProj::H1 { payload } => Pin::new(payload).poll_next(cx),
|
||||
|
||||
#[cfg(feature = "http2")]
|
||||
PayloadProj::H2 { payload } => Pin::new(payload).poll_next(cx),
|
||||
|
||||
PayloadProj::Stream { payload } => payload.poll_next(cx),
|
||||
}
|
||||
}
|
||||
@ -78,12 +97,10 @@ where
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::panic::{RefUnwindSafe, UnwindSafe};
|
||||
|
||||
use static_assertions::{assert_impl_all, assert_not_impl_any};
|
||||
|
||||
use super::*;
|
||||
|
||||
assert_impl_all!(Payload: Unpin);
|
||||
assert_not_impl_any!(Payload: Send, Sync, UnwindSafe, RefUnwindSafe);
|
||||
assert_not_impl_any!(Payload: Send, Sync);
|
||||
}
|
||||
|
@ -130,8 +130,8 @@ impl RequestHead {
|
||||
}
|
||||
}
|
||||
|
||||
/// Request contains `EXPECT` header.
|
||||
#[inline]
|
||||
/// Request contains `EXPECT` header
|
||||
pub fn expect(&self) -> bool {
|
||||
self.flags.contains(Flags::EXPECT)
|
||||
}
|
||||
|
@ -113,14 +113,14 @@ impl<P> Request<P> {
|
||||
#[inline]
|
||||
/// Http message part of the request
|
||||
pub fn head(&self) -> &RequestHead {
|
||||
&*self.head
|
||||
&self.head
|
||||
}
|
||||
|
||||
#[inline]
|
||||
#[doc(hidden)]
|
||||
/// Mutable reference to a HTTP message part of the request
|
||||
pub fn head_mut(&mut self) -> &mut RequestHead {
|
||||
&mut *self.head
|
||||
&mut self.head
|
||||
}
|
||||
|
||||
/// Mutable reference to the message's headers.
|
||||
|
@ -144,7 +144,7 @@ impl ResponseBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Set connection type to Upgrade
|
||||
/// Set connection type to `Upgrade`.
|
||||
#[inline]
|
||||
pub fn upgrade<V>(&mut self, value: V) -> &mut Self
|
||||
where
|
||||
@ -161,7 +161,7 @@ impl ResponseBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
/// Force close connection, even if it is marked as keep-alive
|
||||
/// Force-close connection, even if it is marked as keep-alive.
|
||||
#[inline]
|
||||
pub fn force_close(&mut self) -> &mut Self {
|
||||
if let Some(parts) = self.inner() {
|
||||
|
@ -42,7 +42,7 @@ impl ResponseHead {
|
||||
&mut self.headers
|
||||
}
|
||||
|
||||
/// Sets the flag that controls wether to send headers formatted as Camel-Case.
|
||||
/// Sets the flag that controls whether to send headers formatted as Camel-Case.
|
||||
///
|
||||
/// Only applicable to HTTP/1.x responses; HTTP/2 header names are always lowercase.
|
||||
#[inline]
|
||||
@ -210,14 +210,15 @@ mod tests {
|
||||
use memchr::memmem;
|
||||
|
||||
use crate::{
|
||||
h1::H1Service,
|
||||
header::{HeaderName, HeaderValue},
|
||||
Error, HttpService, Request, Response,
|
||||
Error, Request, Response, ServiceConfig,
|
||||
};
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn camel_case_headers() {
|
||||
let mut srv = actix_http_test::test_server(|| {
|
||||
HttpService::new(|req: Request| async move {
|
||||
H1Service::with_config(ServiceConfig::default(), |req: Request| async move {
|
||||
let mut res = Response::ok();
|
||||
|
||||
if req.path().contains("camel") {
|
||||
@ -228,6 +229,7 @@ mod tests {
|
||||
HeaderName::from_static("foo-bar"),
|
||||
HeaderValue::from_static("baz"),
|
||||
);
|
||||
|
||||
Ok::<_, Error>(res)
|
||||
})
|
||||
.tcp()
|
||||
@ -235,9 +237,11 @@ mod tests {
|
||||
.await;
|
||||
|
||||
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
|
||||
let _ = stream.write_all(b"GET /camel HTTP/1.1\r\nConnection: Close\r\n\r\n");
|
||||
let mut data = vec![0; 1024];
|
||||
let _ = stream.read(&mut data);
|
||||
stream
|
||||
.write_all(b"GET /camel HTTP/1.1\r\nConnection: Close\r\n\r\n")
|
||||
.unwrap();
|
||||
let mut data = vec![];
|
||||
let _ = stream.read_to_end(&mut data).unwrap();
|
||||
assert_eq!(&data[..17], b"HTTP/1.1 200 OK\r\n");
|
||||
assert!(memmem::find(&data, b"Foo-Bar").is_some());
|
||||
assert!(memmem::find(&data, b"foo-bar").is_none());
|
||||
@ -247,9 +251,11 @@ mod tests {
|
||||
assert!(memmem::find(&data, b"content-length").is_none());
|
||||
|
||||
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
|
||||
let _ = stream.write_all(b"GET /lower HTTP/1.1\r\nConnection: Close\r\n\r\n");
|
||||
let mut data = vec![0; 1024];
|
||||
let _ = stream.read(&mut data);
|
||||
stream
|
||||
.write_all(b"GET /lower HTTP/1.1\r\nConnection: Close\r\n\r\n")
|
||||
.unwrap();
|
||||
let mut data = vec![];
|
||||
let _ = stream.read_to_end(&mut data).unwrap();
|
||||
assert_eq!(&data[..17], b"HTTP/1.1 200 OK\r\n");
|
||||
assert!(memmem::find(&data, b"Foo-Bar").is_none());
|
||||
assert!(memmem::find(&data, b"foo-bar").is_some());
|
||||
|
@ -83,13 +83,13 @@ impl<B> Response<B> {
|
||||
/// Returns a reference to the head of this response.
|
||||
#[inline]
|
||||
pub fn head(&self) -> &ResponseHead {
|
||||
&*self.head
|
||||
&self.head
|
||||
}
|
||||
|
||||
/// Returns a mutable reference to the head of this response.
|
||||
#[inline]
|
||||
pub fn head_mut(&mut self) -> &mut ResponseHead {
|
||||
&mut *self.head
|
||||
&mut self.head
|
||||
}
|
||||
|
||||
/// Returns the status code of this response.
|
||||
@ -185,7 +185,7 @@ impl<B> Response<B> {
|
||||
self.replace_body(())
|
||||
}
|
||||
|
||||
/// Map the current body type to another using a closure. Returns a new response.
|
||||
/// Map the current body type to another using a closure, returning a new response.
|
||||
///
|
||||
/// Closure receives the response head and the current body type.
|
||||
#[inline]
|
||||
@ -202,6 +202,7 @@ impl<B> Response<B> {
|
||||
}
|
||||
}
|
||||
|
||||
/// Map the current body to a type-erased `BoxBody`.
|
||||
#[inline]
|
||||
pub fn map_into_boxed_body(self) -> Response<BoxBody>
|
||||
where
|
||||
@ -210,7 +211,7 @@ impl<B> Response<B> {
|
||||
self.map_body(|_, body| body.boxed())
|
||||
}
|
||||
|
||||
/// Returns body, consuming this response.
|
||||
/// Returns the response body, dropping all other parts.
|
||||
#[inline]
|
||||
pub fn into_body(self) -> B {
|
||||
self.body
|
||||
@ -284,6 +285,24 @@ impl From<&'static [u8]> for Response<&'static [u8]> {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Vec<u8>> for Response<Vec<u8>> {
|
||||
fn from(val: Vec<u8>) -> Self {
|
||||
let mut res = Response::with_body(StatusCode::OK, val);
|
||||
let mime = mime::APPLICATION_OCTET_STREAM.try_into_value().unwrap();
|
||||
res.headers_mut().insert(header::CONTENT_TYPE, mime);
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&Vec<u8>> for Response<Vec<u8>> {
|
||||
fn from(val: &Vec<u8>) -> Self {
|
||||
let mut res = Response::with_body(StatusCode::OK, val.clone());
|
||||
let mime = mime::APPLICATION_OCTET_STREAM.try_into_value().unwrap();
|
||||
res.headers_mut().insert(header::CONTENT_TYPE, mime);
|
||||
res
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for Response<String> {
|
||||
fn from(val: String) -> Self {
|
||||
let mut res = Response::with_body(StatusCode::OK, val);
|
||||
|
@ -15,16 +15,48 @@ use actix_service::{
|
||||
};
|
||||
use futures_core::{future::LocalBoxFuture, ready};
|
||||
use pin_project_lite::pin_project;
|
||||
use tracing::error;
|
||||
|
||||
use crate::{
|
||||
body::{BoxBody, MessageBody},
|
||||
builder::HttpServiceBuilder,
|
||||
config::{KeepAlive, ServiceConfig},
|
||||
error::DispatchError,
|
||||
h1, h2, ConnectCallback, OnConnectData, Protocol, Request, Response,
|
||||
h1, ConnectCallback, OnConnectData, Protocol, Request, Response, ServiceConfig,
|
||||
};
|
||||
|
||||
/// A `ServiceFactory` for HTTP/1.1 or HTTP/2 protocol.
|
||||
/// A [`ServiceFactory`] for HTTP/1.1 and HTTP/2 connections.
|
||||
///
|
||||
/// Use [`build`](Self::build) to begin constructing service. Also see [`HttpServiceBuilder`].
|
||||
///
|
||||
/// # Automatic HTTP Version Selection
|
||||
/// There are two ways to select the HTTP version of an incoming connection:
|
||||
/// - One is to rely on the ALPN information that is provided when using a TLS (HTTPS); both
|
||||
/// versions are supported automatically when using either of the `.rustls()` or `.openssl()`
|
||||
/// finalizing methods.
|
||||
/// - The other is to read the first few bytes of the TCP stream. This is the only viable approach
|
||||
/// for supporting H2C, which allows the HTTP/2 protocol to work over plaintext connections. Use
|
||||
/// the `.tcp_auto_h2c()` finalizing method to enable this behavior.
|
||||
///
|
||||
/// # Examples
|
||||
/// ```
|
||||
/// # use std::convert::Infallible;
|
||||
/// use actix_http::{HttpService, Request, Response, StatusCode};
|
||||
///
|
||||
/// // this service would constructed in an actix_server::Server
|
||||
///
|
||||
/// # actix_rt::System::new().block_on(async {
|
||||
/// HttpService::build()
|
||||
/// // the builder finalizing method, other finalizers would not return an `HttpService`
|
||||
/// .finish(|_req: Request| async move {
|
||||
/// Ok::<_, Infallible>(
|
||||
/// Response::build(StatusCode::OK).body("Hello!")
|
||||
/// )
|
||||
/// })
|
||||
/// // the service finalizing method method
|
||||
/// // you can use `.tcp_auto_h2c()`, `.rustls()`, or `.openssl()` instead of `.tcp()`
|
||||
/// .tcp();
|
||||
/// # })
|
||||
/// ```
|
||||
pub struct HttpService<T, S, B, X = h1::ExpectHandler, U = h1::UpgradeHandler> {
|
||||
srv: S,
|
||||
cfg: ServiceConfig,
|
||||
@ -43,9 +75,9 @@ where
|
||||
<S::Service as Service<Request>>::Future: 'static,
|
||||
B: MessageBody + 'static,
|
||||
{
|
||||
/// Create builder for `HttpService` instance.
|
||||
/// Constructs builder for `HttpService` instance.
|
||||
pub fn build() -> HttpServiceBuilder<T, S> {
|
||||
HttpServiceBuilder::new()
|
||||
HttpServiceBuilder::default()
|
||||
}
|
||||
}
|
||||
|
||||
@ -58,12 +90,10 @@ where
|
||||
<S::Service as Service<Request>>::Future: 'static,
|
||||
B: MessageBody + 'static,
|
||||
{
|
||||
/// Create new `HttpService` instance.
|
||||
/// Constructs new `HttpService` instance from service with default config.
|
||||
pub fn new<F: IntoServiceFactory<S, Request>>(service: F) -> Self {
|
||||
let cfg = ServiceConfig::new(KeepAlive::Timeout(5), 5000, 0, false, None);
|
||||
|
||||
HttpService {
|
||||
cfg,
|
||||
cfg: ServiceConfig::default(),
|
||||
srv: service.into_factory(),
|
||||
expect: h1::ExpectHandler,
|
||||
upgrade: None,
|
||||
@ -72,7 +102,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Create new `HttpService` instance with config.
|
||||
/// Constructs new `HttpService` instance from config and service.
|
||||
pub(crate) fn with_config<F: IntoServiceFactory<S, Request>>(
|
||||
cfg: ServiceConfig,
|
||||
service: F,
|
||||
@ -97,11 +127,10 @@ where
|
||||
<S::Service as Service<Request>>::Future: 'static,
|
||||
B: MessageBody,
|
||||
{
|
||||
/// Provide service for `EXPECT: 100-Continue` support.
|
||||
/// Sets service for `Expect: 100-Continue` handling.
|
||||
///
|
||||
/// Service get called with request that contains `EXPECT` header.
|
||||
/// Service must return request in case of success, in that case
|
||||
/// request will be forwarded to main service.
|
||||
/// An expect service is called with requests that contain an `Expect` header. A successful
|
||||
/// response type is also a request which will be forwarded to the main service.
|
||||
pub fn expect<X1>(self, expect: X1) -> HttpService<T, S, B, X1, U>
|
||||
where
|
||||
X1: ServiceFactory<Request, Config = (), Response = Request>,
|
||||
@ -118,10 +147,10 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
/// Provide service for custom `Connection: UPGRADE` support.
|
||||
/// Sets service for custom `Connection: Upgrade` handling.
|
||||
///
|
||||
/// If service is provided then normal requests handling get halted
|
||||
/// and this service get called with original request and framed object.
|
||||
/// If service is provided then normal requests handling get halted and this service get called
|
||||
/// with original request and framed object.
|
||||
pub fn upgrade<U1>(self, upgrade: Option<U1>) -> HttpService<T, S, B, X, U1>
|
||||
where
|
||||
U1: ServiceFactory<(Request, Framed<T, h1::Codec>), Config = (), Response = ()>,
|
||||
@ -166,7 +195,9 @@ where
|
||||
U::Error: fmt::Display + Into<Response<BoxBody>>,
|
||||
U::InitError: fmt::Debug,
|
||||
{
|
||||
/// Create simple tcp stream service
|
||||
/// Creates TCP stream service from HTTP service.
|
||||
///
|
||||
/// The resulting service only supports HTTP/1.x.
|
||||
pub fn tcp(
|
||||
self,
|
||||
) -> impl ServiceFactory<
|
||||
@ -182,6 +213,61 @@ where
|
||||
})
|
||||
.and_then(self)
|
||||
}
|
||||
|
||||
/// Creates TCP stream service from HTTP service that automatically selects HTTP/1.x or HTTP/2
|
||||
/// on plaintext connections.
|
||||
#[cfg(feature = "http2")]
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "http2")))]
|
||||
pub fn tcp_auto_h2c(
|
||||
self,
|
||||
) -> impl ServiceFactory<
|
||||
TcpStream,
|
||||
Config = (),
|
||||
Response = (),
|
||||
Error = DispatchError,
|
||||
InitError = (),
|
||||
> {
|
||||
fn_service(move |io: TcpStream| async move {
|
||||
// subset of HTTP/2 preface defined by RFC 9113 §3.4
|
||||
// this subset was chosen to maximize likelihood that peeking only once will allow us to
|
||||
// reliably determine version or else it should fallback to h1 and fail quickly if data
|
||||
// on the wire is junk
|
||||
const H2_PREFACE: &[u8] = b"PRI * HTTP/2";
|
||||
|
||||
let mut buf = [0; 12];
|
||||
|
||||
io.peek(&mut buf).await?;
|
||||
|
||||
let proto = if buf == H2_PREFACE {
|
||||
Protocol::Http2
|
||||
} else {
|
||||
Protocol::Http1
|
||||
};
|
||||
|
||||
let peer_addr = io.peer_addr().ok();
|
||||
Ok((io, proto, peer_addr))
|
||||
})
|
||||
.and_then(self)
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration options used when accepting TLS connection.
|
||||
#[cfg(any(feature = "openssl", feature = "rustls"))]
|
||||
#[cfg_attr(docsrs, doc(cfg(any(feature = "openssl", feature = "rustls"))))]
|
||||
#[derive(Debug, Default)]
|
||||
pub struct TlsAcceptorConfig {
|
||||
pub(crate) handshake_timeout: Option<std::time::Duration>,
|
||||
}
|
||||
|
||||
#[cfg(any(feature = "openssl", feature = "rustls"))]
|
||||
impl TlsAcceptorConfig {
|
||||
/// Set TLS handshake timeout duration.
|
||||
pub fn handshake_timeout(self, dur: std::time::Duration) -> Self {
|
||||
Self {
|
||||
handshake_timeout: Some(dur),
|
||||
// ..self
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "openssl")]
|
||||
@ -223,6 +309,7 @@ mod openssl {
|
||||
U::InitError: fmt::Debug,
|
||||
{
|
||||
/// Create OpenSSL based service.
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "openssl")))]
|
||||
pub fn openssl(
|
||||
self,
|
||||
acceptor: SslAcceptor,
|
||||
@ -233,7 +320,29 @@ mod openssl {
|
||||
Error = TlsError<SslError, DispatchError>,
|
||||
InitError = (),
|
||||
> {
|
||||
Acceptor::new(acceptor)
|
||||
self.openssl_with_config(acceptor, TlsAcceptorConfig::default())
|
||||
}
|
||||
|
||||
/// Create OpenSSL based service with custom TLS acceptor configuration.
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "openssl")))]
|
||||
pub fn openssl_with_config(
|
||||
self,
|
||||
acceptor: SslAcceptor,
|
||||
tls_acceptor_config: TlsAcceptorConfig,
|
||||
) -> impl ServiceFactory<
|
||||
TcpStream,
|
||||
Config = (),
|
||||
Response = (),
|
||||
Error = TlsError<SslError, DispatchError>,
|
||||
InitError = (),
|
||||
> {
|
||||
let mut acceptor = Acceptor::new(acceptor);
|
||||
|
||||
if let Some(handshake_timeout) = tls_acceptor_config.handshake_timeout {
|
||||
acceptor.set_handshake_timeout(handshake_timeout);
|
||||
}
|
||||
|
||||
acceptor
|
||||
.map_init_err(|_| {
|
||||
unreachable!("TLS acceptor service factory does not error on init")
|
||||
})
|
||||
@ -295,9 +404,26 @@ mod rustls {
|
||||
U::InitError: fmt::Debug,
|
||||
{
|
||||
/// Create Rustls based service.
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "rustls")))]
|
||||
pub fn rustls(
|
||||
self,
|
||||
config: ServerConfig,
|
||||
) -> impl ServiceFactory<
|
||||
TcpStream,
|
||||
Config = (),
|
||||
Response = (),
|
||||
Error = TlsError<io::Error, DispatchError>,
|
||||
InitError = (),
|
||||
> {
|
||||
self.rustls_with_config(config, TlsAcceptorConfig::default())
|
||||
}
|
||||
|
||||
/// Create Rustls based service with custom TLS acceptor configuration.
|
||||
#[cfg_attr(docsrs, doc(cfg(feature = "rustls")))]
|
||||
pub fn rustls_with_config(
|
||||
self,
|
||||
mut config: ServerConfig,
|
||||
tls_acceptor_config: TlsAcceptorConfig,
|
||||
) -> impl ServiceFactory<
|
||||
TcpStream,
|
||||
Config = (),
|
||||
@ -309,7 +435,13 @@ mod rustls {
|
||||
protos.extend_from_slice(&config.alpn_protocols);
|
||||
config.alpn_protocols = protos;
|
||||
|
||||
Acceptor::new(config)
|
||||
let mut acceptor = Acceptor::new(config);
|
||||
|
||||
if let Some(handshake_timeout) = tls_acceptor_config.handshake_timeout {
|
||||
acceptor.set_handshake_timeout(handshake_timeout);
|
||||
}
|
||||
|
||||
acceptor
|
||||
.map_init_err(|_| {
|
||||
unreachable!("TLS acceptor service factory does not error on init")
|
||||
})
|
||||
@ -373,13 +505,13 @@ where
|
||||
Box::pin(async move {
|
||||
let expect = expect
|
||||
.await
|
||||
.map_err(|e| log::error!("Init http expect service error: {:?}", e))?;
|
||||
.map_err(|e| error!("Init http expect service error: {:?}", e))?;
|
||||
|
||||
let upgrade = match upgrade {
|
||||
Some(upgrade) => {
|
||||
let upgrade = upgrade
|
||||
.await
|
||||
.map_err(|e| log::error!("Init http upgrade service error: {:?}", e))?;
|
||||
.map_err(|e| error!("Init http upgrade service error: {:?}", e))?;
|
||||
Some(upgrade)
|
||||
}
|
||||
None => None,
|
||||
@ -387,7 +519,7 @@ where
|
||||
|
||||
let service = service
|
||||
.await
|
||||
.map_err(|e| log::error!("Init http service error: {:?}", e))?;
|
||||
.map_err(|e| error!("Init http service error: {:?}", e))?;
|
||||
|
||||
Ok(HttpServiceHandler::new(
|
||||
cfg,
|
||||
@ -494,7 +626,7 @@ where
|
||||
|
||||
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
|
||||
self._poll_ready(cx).map_err(|err| {
|
||||
log::error!("HTTP service readiness error: {:?}", err);
|
||||
error!("HTTP service readiness error: {:?}", err);
|
||||
DispatchError::Service(err)
|
||||
})
|
||||
}
|
||||
@ -506,10 +638,11 @@ where
|
||||
let conn_data = OnConnectData::from_io(&io, self.on_connect_ext.as_deref());
|
||||
|
||||
match proto {
|
||||
#[cfg(feature = "http2")]
|
||||
Protocol::Http2 => HttpServiceHandlerResponse {
|
||||
state: State::H2Handshake {
|
||||
handshake: Some((
|
||||
h2::handshake_with_timeout(io, &self.cfg),
|
||||
crate::h2::handshake_with_timeout(io, &self.cfg),
|
||||
self.cfg.clone(),
|
||||
self.flow.clone(),
|
||||
conn_data,
|
||||
@ -518,6 +651,11 @@ where
|
||||
},
|
||||
},
|
||||
|
||||
#[cfg(not(feature = "http2"))]
|
||||
Protocol::Http2 => {
|
||||
panic!("HTTP/2 support is disabled (enable with the `http2` feature flag)")
|
||||
}
|
||||
|
||||
Protocol::Http1 => HttpServiceHandlerResponse {
|
||||
state: State::H1 {
|
||||
dispatcher: h1::Dispatcher::new(
|
||||
@ -535,6 +673,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "http2"))]
|
||||
pin_project! {
|
||||
#[project = StateProj]
|
||||
enum State<T, S, B, X, U>
|
||||
@ -556,10 +695,37 @@ pin_project! {
|
||||
U::Error: fmt::Display,
|
||||
{
|
||||
H1 { #[pin] dispatcher: h1::Dispatcher<T, S, B, X, U> },
|
||||
H2 { #[pin] dispatcher: h2::Dispatcher<T, S, B, X, U> },
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "http2")]
|
||||
pin_project! {
|
||||
#[project = StateProj]
|
||||
enum State<T, S, B, X, U>
|
||||
where
|
||||
T: AsyncRead,
|
||||
T: AsyncWrite,
|
||||
T: Unpin,
|
||||
|
||||
S: Service<Request>,
|
||||
S::Future: 'static,
|
||||
S::Error: Into<Response<BoxBody>>,
|
||||
|
||||
B: MessageBody,
|
||||
|
||||
X: Service<Request, Response = Request>,
|
||||
X::Error: Into<Response<BoxBody>>,
|
||||
|
||||
U: Service<(Request, Framed<T, h1::Codec>), Response = ()>,
|
||||
U::Error: fmt::Display,
|
||||
{
|
||||
H1 { #[pin] dispatcher: h1::Dispatcher<T, S, B, X, U> },
|
||||
|
||||
H2 { #[pin] dispatcher: crate::h2::Dispatcher<T, S, B, X, U> },
|
||||
|
||||
H2Handshake {
|
||||
handshake: Option<(
|
||||
h2::HandshakeWithTimeout<T>,
|
||||
crate::h2::HandshakeWithTimeout<T>,
|
||||
ServiceConfig,
|
||||
Rc<HttpFlow<S, X, U>>,
|
||||
OnConnectData,
|
||||
@ -618,21 +784,25 @@ where
|
||||
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
|
||||
match self.as_mut().project().state.project() {
|
||||
StateProj::H1 { dispatcher } => dispatcher.poll(cx),
|
||||
|
||||
#[cfg(feature = "http2")]
|
||||
StateProj::H2 { dispatcher } => dispatcher.poll(cx),
|
||||
|
||||
#[cfg(feature = "http2")]
|
||||
StateProj::H2Handshake { handshake: data } => {
|
||||
match ready!(Pin::new(&mut data.as_mut().unwrap().0).poll(cx)) {
|
||||
Ok((conn, timer)) => {
|
||||
let (_, config, flow, conn_data, peer_addr) = data.take().unwrap();
|
||||
|
||||
self.as_mut().project().state.set(State::H2 {
|
||||
dispatcher: h2::Dispatcher::new(
|
||||
dispatcher: crate::h2::Dispatcher::new(
|
||||
conn, flow, config, peer_addr, conn_data, timer,
|
||||
),
|
||||
});
|
||||
self.poll(cx)
|
||||
}
|
||||
Err(err) => {
|
||||
trace!("H2 handshake error: {}", err);
|
||||
tracing::trace!("H2 handshake error: {}", err);
|
||||
Poll::Ready(Err(err))
|
||||
}
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
//! Various testing helpers for use in internal and app tests.
|
||||
|
||||
use std::{
|
||||
cell::{Ref, RefCell},
|
||||
cell::{Ref, RefCell, RefMut},
|
||||
io::{self, Read, Write},
|
||||
pin::Pin,
|
||||
rc::Rc,
|
||||
@ -19,29 +19,7 @@ use crate::{
|
||||
Request,
|
||||
};
|
||||
|
||||
/// Test `Request` builder
|
||||
///
|
||||
/// ```ignore
|
||||
/// # use http::{header, StatusCode};
|
||||
/// # use actix_web::*;
|
||||
/// use actix_web::test::TestRequest;
|
||||
///
|
||||
/// fn index(req: &HttpRequest) -> Response {
|
||||
/// if let Some(hdr) = req.headers().get(header::CONTENT_TYPE) {
|
||||
/// Response::Ok().into()
|
||||
/// } else {
|
||||
/// Response::BadRequest().into()
|
||||
/// }
|
||||
/// }
|
||||
///
|
||||
/// let resp = TestRequest::default().insert_header("content-type", "text/plain")
|
||||
/// .run(&index)
|
||||
/// .unwrap();
|
||||
/// assert_eq!(resp.status(), StatusCode::OK);
|
||||
///
|
||||
/// let resp = TestRequest::default().run(&index).unwrap();
|
||||
/// assert_eq!(resp.status(), StatusCode::BAD_REQUEST);
|
||||
/// ```
|
||||
/// Test `Request` builder.
|
||||
pub struct TestRequest(Option<Inner>);
|
||||
|
||||
struct Inner {
|
||||
@ -157,10 +135,11 @@ fn parts(parts: &mut Option<Inner>) -> &mut Inner {
|
||||
}
|
||||
|
||||
/// Async I/O test buffer.
|
||||
#[derive(Debug)]
|
||||
pub struct TestBuffer {
|
||||
pub read_buf: BytesMut,
|
||||
pub write_buf: BytesMut,
|
||||
pub err: Option<io::Error>,
|
||||
pub read_buf: Rc<RefCell<BytesMut>>,
|
||||
pub write_buf: Rc<RefCell<BytesMut>>,
|
||||
pub err: Option<Rc<io::Error>>,
|
||||
}
|
||||
|
||||
impl TestBuffer {
|
||||
@ -170,34 +149,69 @@ impl TestBuffer {
|
||||
T: Into<BytesMut>,
|
||||
{
|
||||
Self {
|
||||
read_buf: data.into(),
|
||||
write_buf: BytesMut::new(),
|
||||
read_buf: Rc::new(RefCell::new(data.into())),
|
||||
write_buf: Rc::new(RefCell::new(BytesMut::new())),
|
||||
err: None,
|
||||
}
|
||||
}
|
||||
|
||||
// intentionally not using Clone trait
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn clone(&self) -> Self {
|
||||
Self {
|
||||
read_buf: self.read_buf.clone(),
|
||||
write_buf: self.write_buf.clone(),
|
||||
err: self.err.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create new empty `TestBuffer` instance.
|
||||
pub fn empty() -> Self {
|
||||
Self::new("")
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn read_buf_slice(&self) -> Ref<'_, [u8]> {
|
||||
Ref::map(self.read_buf.borrow(), |b| b.as_ref())
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn read_buf_slice_mut(&self) -> RefMut<'_, [u8]> {
|
||||
RefMut::map(self.read_buf.borrow_mut(), |b| b.as_mut())
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn write_buf_slice(&self) -> Ref<'_, [u8]> {
|
||||
Ref::map(self.write_buf.borrow(), |b| b.as_ref())
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn write_buf_slice_mut(&self) -> RefMut<'_, [u8]> {
|
||||
RefMut::map(self.write_buf.borrow_mut(), |b| b.as_mut())
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn take_write_buf(&self) -> Bytes {
|
||||
self.write_buf.borrow_mut().split().freeze()
|
||||
}
|
||||
|
||||
/// Add data to read buffer.
|
||||
pub fn extend_read_buf<T: AsRef<[u8]>>(&mut self, data: T) {
|
||||
self.read_buf.extend_from_slice(data.as_ref())
|
||||
self.read_buf.borrow_mut().extend_from_slice(data.as_ref())
|
||||
}
|
||||
}
|
||||
|
||||
impl io::Read for TestBuffer {
|
||||
fn read(&mut self, dst: &mut [u8]) -> Result<usize, io::Error> {
|
||||
if self.read_buf.is_empty() {
|
||||
if self.read_buf.borrow().is_empty() {
|
||||
if self.err.is_some() {
|
||||
Err(self.err.take().unwrap())
|
||||
Err(Rc::try_unwrap(self.err.take().unwrap()).unwrap())
|
||||
} else {
|
||||
Err(io::Error::new(io::ErrorKind::WouldBlock, ""))
|
||||
}
|
||||
} else {
|
||||
let size = std::cmp::min(self.read_buf.len(), dst.len());
|
||||
let b = self.read_buf.split_to(size);
|
||||
let size = std::cmp::min(self.read_buf.borrow().len(), dst.len());
|
||||
let b = self.read_buf.borrow_mut().split_to(size);
|
||||
dst[..size].copy_from_slice(&b);
|
||||
Ok(size)
|
||||
}
|
||||
@ -206,7 +220,7 @@ impl io::Read for TestBuffer {
|
||||
|
||||
impl io::Write for TestBuffer {
|
||||
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
|
||||
self.write_buf.extend(buf);
|
||||
self.write_buf.borrow_mut().extend(buf);
|
||||
Ok(buf.len())
|
||||
}
|
||||
|
||||
|
@ -1,14 +1,17 @@
|
||||
use actix_codec::{Decoder, Encoder};
|
||||
use bitflags::bitflags;
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use bytestring::ByteString;
|
||||
use tokio_util::codec::{Decoder, Encoder};
|
||||
use tracing::error;
|
||||
|
||||
use super::frame::Parser;
|
||||
use super::proto::{CloseReason, OpCode};
|
||||
use super::ProtocolError;
|
||||
use super::{
|
||||
frame::Parser,
|
||||
proto::{CloseReason, OpCode},
|
||||
ProtocolError,
|
||||
};
|
||||
|
||||
/// A WebSocket message.
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum Message {
|
||||
/// Text message.
|
||||
Text(ByteString),
|
||||
@ -33,7 +36,7 @@ pub enum Message {
|
||||
}
|
||||
|
||||
/// A WebSocket frame.
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum Frame {
|
||||
/// Text frame. Note that the codec does not validate UTF-8 encoding.
|
||||
Text(Bytes),
|
||||
@ -55,7 +58,7 @@ pub enum Frame {
|
||||
}
|
||||
|
||||
/// A WebSocket continuation item.
|
||||
#[derive(Debug, PartialEq)]
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub enum Item {
|
||||
FirstText(Bytes),
|
||||
FirstBinary(Bytes),
|
||||
|
@ -1,6 +1,8 @@
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::{
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
task::{Context, Poll},
|
||||
};
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite, Framed};
|
||||
use actix_service::{IntoService, Service};
|
||||
@ -71,10 +73,12 @@ mod inner {
|
||||
use actix_service::{IntoService, Service};
|
||||
use futures_core::stream::Stream;
|
||||
use local_channel::mpsc;
|
||||
use log::debug;
|
||||
use pin_project_lite::pin_project;
|
||||
use tracing::debug;
|
||||
|
||||
use actix_codec::{AsyncRead, AsyncWrite, Decoder, Encoder, Framed};
|
||||
use actix_codec::Framed;
|
||||
use tokio::io::{AsyncRead, AsyncWrite};
|
||||
use tokio_util::codec::{Decoder, Encoder};
|
||||
|
||||
use crate::{body::BoxBody, Response};
|
||||
|
||||
|
@ -1,11 +1,13 @@
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use bytes::{Buf, BufMut, BytesMut};
|
||||
use log::debug;
|
||||
use tracing::debug;
|
||||
|
||||
use crate::ws::mask::apply_mask;
|
||||
use crate::ws::proto::{CloseCode, CloseReason, OpCode};
|
||||
use crate::ws::ProtocolError;
|
||||
use super::{
|
||||
mask::apply_mask,
|
||||
proto::{CloseCode, CloseReason, OpCode},
|
||||
ProtocolError,
|
||||
};
|
||||
|
||||
/// A struct representing a WebSocket frame.
|
||||
#[derive(Debug)]
|
||||
@ -15,7 +17,6 @@ impl Parser {
|
||||
fn parse_metadata(
|
||||
src: &[u8],
|
||||
server: bool,
|
||||
max_size: usize,
|
||||
) -> Result<Option<(usize, bool, OpCode, usize, Option<[u8; 4]>)>, ProtocolError> {
|
||||
let chunk_len = src.len();
|
||||
|
||||
@ -58,20 +59,12 @@ impl Parser {
|
||||
return Ok(None);
|
||||
}
|
||||
let len = u64::from_be_bytes(TryFrom::try_from(&src[idx..idx + 8]).unwrap());
|
||||
if len > max_size as u64 {
|
||||
return Err(ProtocolError::Overflow);
|
||||
}
|
||||
idx += 8;
|
||||
len as usize
|
||||
} else {
|
||||
len as usize
|
||||
};
|
||||
|
||||
// check for max allowed size
|
||||
if length > max_size {
|
||||
return Err(ProtocolError::Overflow);
|
||||
}
|
||||
|
||||
let mask = if server {
|
||||
if chunk_len < idx + 4 {
|
||||
return Ok(None);
|
||||
@ -96,11 +89,10 @@ impl Parser {
|
||||
max_size: usize,
|
||||
) -> Result<Option<(bool, OpCode, Option<BytesMut>)>, ProtocolError> {
|
||||
// try to parse ws frame metadata
|
||||
let (idx, finished, opcode, length, mask) =
|
||||
match Parser::parse_metadata(src, server, max_size)? {
|
||||
None => return Ok(None),
|
||||
Some(res) => res,
|
||||
};
|
||||
let (idx, finished, opcode, length, mask) = match Parser::parse_metadata(src, server)? {
|
||||
None => return Ok(None),
|
||||
Some(res) => res,
|
||||
};
|
||||
|
||||
// not enough data
|
||||
if src.len() < idx + length {
|
||||
@ -110,6 +102,13 @@ impl Parser {
|
||||
// remove prefix
|
||||
src.advance(idx);
|
||||
|
||||
// check for max allowed size
|
||||
if length > max_size {
|
||||
// drop the payload
|
||||
src.advance(length);
|
||||
return Err(ProtocolError::Overflow);
|
||||
}
|
||||
|
||||
// no need for body
|
||||
if length == 0 {
|
||||
return Ok(Some((finished, opcode, None)));
|
||||
@ -314,7 +313,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_parse_frame_no_mask() {
|
||||
let mut buf = BytesMut::from(&[0b0000_0001u8, 0b0000_0001u8][..]);
|
||||
buf.extend(&[1u8]);
|
||||
buf.extend([1u8]);
|
||||
|
||||
assert!(Parser::parse(&mut buf, true, 1024).is_err());
|
||||
|
||||
@ -327,7 +326,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_parse_frame_max_size() {
|
||||
let mut buf = BytesMut::from(&[0b0000_0001u8, 0b0000_0010u8][..]);
|
||||
buf.extend(&[1u8, 1u8]);
|
||||
buf.extend([1u8, 1u8]);
|
||||
|
||||
assert!(Parser::parse(&mut buf, true, 1).is_err());
|
||||
|
||||
@ -337,6 +336,30 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_frame_max_size_recoverability() {
|
||||
let mut buf = BytesMut::new();
|
||||
// The first text frame with length == 2, payload doesn't matter.
|
||||
buf.extend([0b0000_0001u8, 0b0000_0010u8, 0b0000_0000u8, 0b0000_0000u8]);
|
||||
// Next binary frame with length == 2 and payload == `[0x1111_1111u8, 0x1111_1111u8]`.
|
||||
buf.extend([0b0000_0010u8, 0b0000_0010u8, 0b1111_1111u8, 0b1111_1111u8]);
|
||||
|
||||
assert_eq!(buf.len(), 8);
|
||||
assert!(matches!(
|
||||
Parser::parse(&mut buf, false, 1),
|
||||
Err(ProtocolError::Overflow)
|
||||
));
|
||||
assert_eq!(buf.len(), 4);
|
||||
let frame = extract(Parser::parse(&mut buf, false, 2));
|
||||
assert!(!frame.finished);
|
||||
assert_eq!(frame.opcode, OpCode::Binary);
|
||||
assert_eq!(
|
||||
frame.payload,
|
||||
Bytes::from(vec![0b1111_1111u8, 0b1111_1111u8])
|
||||
);
|
||||
assert_eq!(buf.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ping_frame() {
|
||||
let mut buf = BytesMut::new();
|
||||
|
@ -47,40 +47,6 @@ pub fn apply_mask_fast32(buf: &mut [u8], mask: [u8; 4]) {
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
// legacy test from old apply mask test. kept for now for back compat test.
|
||||
// TODO: remove it and favor the other test.
|
||||
#[test]
|
||||
fn test_apply_mask_legacy() {
|
||||
let mask = [0x6d, 0xb6, 0xb2, 0x80];
|
||||
|
||||
let unmasked = vec![
|
||||
0xf3, 0x00, 0x01, 0x02, 0x03, 0x80, 0x81, 0x82, 0xff, 0xfe, 0x00, 0x17, 0x74, 0xf9,
|
||||
0x12, 0x03,
|
||||
];
|
||||
|
||||
// Check masking with proper alignment.
|
||||
{
|
||||
let mut masked = unmasked.clone();
|
||||
apply_mask_fallback(&mut masked, mask);
|
||||
|
||||
let mut masked_fast = unmasked.clone();
|
||||
apply_mask(&mut masked_fast, mask);
|
||||
|
||||
assert_eq!(masked, masked_fast);
|
||||
}
|
||||
|
||||
// Check masking without alignment.
|
||||
{
|
||||
let mut masked = unmasked.clone();
|
||||
apply_mask_fallback(&mut masked[1..], mask);
|
||||
|
||||
let mut masked_fast = unmasked;
|
||||
apply_mask(&mut masked_fast[1..], mask);
|
||||
|
||||
assert_eq!(masked, masked_fast);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_apply_mask() {
|
||||
let mask = [0x6d, 0xb6, 0xb2, 0x80];
|
||||
|
@ -67,7 +67,7 @@ pub enum ProtocolError {
|
||||
}
|
||||
|
||||
/// WebSocket handshake errors
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Display, Error)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Display, Error)]
|
||||
pub enum HandshakeError {
|
||||
/// Only get method is allowed.
|
||||
#[display(fmt = "Method not allowed.")]
|
||||
|
@ -3,6 +3,9 @@ use std::{
|
||||
fmt,
|
||||
};
|
||||
|
||||
use base64::prelude::*;
|
||||
use tracing::error;
|
||||
|
||||
/// Operation codes defined in [RFC 6455 §11.8].
|
||||
///
|
||||
/// [RFC 6455]: https://datatracker.ietf.org/doc/html/rfc6455#section-11.8
|
||||
@ -58,7 +61,7 @@ impl From<OpCode> for u8 {
|
||||
Ping => 9,
|
||||
Pong => 10,
|
||||
Bad => {
|
||||
log::error!("Attempted to convert invalid opcode to u8. This is a bug.");
|
||||
error!("Attempted to convert invalid opcode to u8. This is a bug.");
|
||||
8 // if this somehow happens, a close frame will help us tear down quickly
|
||||
}
|
||||
}
|
||||
@ -242,7 +245,7 @@ pub fn hash_key(key: &[u8]) -> [u8; 28] {
|
||||
};
|
||||
|
||||
let mut hash_b64 = [0; 28];
|
||||
let n = base64::encode_config_slice(&hash, base64::STANDARD, &mut hash_b64);
|
||||
let n = BASE64_STANDARD.encode_slice(hash, &mut hash_b64).unwrap();
|
||||
assert_eq!(n, 28);
|
||||
|
||||
hash_b64
|
||||
|
@ -31,7 +31,7 @@ const STR: &str = "Hello World Hello World Hello World Hello World Hello World \
|
||||
Hello World Hello World Hello World Hello World Hello World";
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_h1_v2() {
|
||||
async fn h1_v2() {
|
||||
let srv = test_server(move || {
|
||||
HttpService::build()
|
||||
.finish(|_| future::ok::<_, Infallible>(Response::ok().set_body(STR)))
|
||||
@ -59,7 +59,7 @@ async fn test_h1_v2() {
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_connection_close() {
|
||||
async fn connection_close() {
|
||||
let srv = test_server(move || {
|
||||
HttpService::build()
|
||||
.finish(|_| future::ok::<_, Infallible>(Response::ok().set_body(STR)))
|
||||
@ -73,7 +73,7 @@ async fn test_connection_close() {
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_with_query_parameter() {
|
||||
async fn with_query_parameter() {
|
||||
let srv = test_server(move || {
|
||||
HttpService::build()
|
||||
.finish(|req: Request| async move {
|
||||
@ -104,7 +104,7 @@ impl From<ExpectFailed> for Response<BoxBody> {
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_h1_expect() {
|
||||
async fn h1_expect() {
|
||||
let srv = test_server(move || {
|
||||
HttpService::build()
|
||||
.expect(|req: Request| async {
|
||||
|
@ -1,4 +1,4 @@
|
||||
use std::io;
|
||||
use std::{io, time::Duration};
|
||||
|
||||
use actix_http::{error::Error, HttpService, Response};
|
||||
use actix_server::Server;
|
||||
@ -19,7 +19,7 @@ async fn h2_ping_pong() -> io::Result<()> {
|
||||
.workers(1)
|
||||
.listen("h2_ping_pong", lst, || {
|
||||
HttpService::build()
|
||||
.keep_alive(3)
|
||||
.keep_alive(Duration::from_secs(3))
|
||||
.h2(|_| async { Ok::<_, Error>(Response::ok()) })
|
||||
.tcp()
|
||||
})?
|
||||
@ -92,10 +92,10 @@ async fn h2_handshake_timeout() -> io::Result<()> {
|
||||
.workers(1)
|
||||
.listen("h2_ping_pong", lst, || {
|
||||
HttpService::build()
|
||||
.keep_alive(30)
|
||||
.keep_alive(Duration::from_secs(30))
|
||||
// set first request timeout to 5 seconds.
|
||||
// this is the timeout used for http2 handshake.
|
||||
.client_timeout(5000)
|
||||
.client_request_timeout(Duration::from_secs(5))
|
||||
.h2(|_| async { Ok::<_, Error>(Response::ok()) })
|
||||
.tcp()
|
||||
})?
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user