diff --git a/.cargo/config.toml b/.cargo/config.toml deleted file mode 100644 index 931b20b2c..000000000 --- a/.cargo/config.toml +++ /dev/null @@ -1,14 +0,0 @@ -[alias] -lint = "clippy --workspace --tests --examples --bins -- -Dclippy::todo" -lint-all = "clippy --workspace --all-features --tests --examples --bins -- -Dclippy::todo" - -# lib checking -ci-check-min = "hack --workspace check --no-default-features" -ci-check-default = "hack --workspace check" -ci-check-default-tests = "check --workspace --tests" -ci-check-all-feature-powerset="hack --workspace --feature-powerset --skip=__compress,experimental-io-uring check" -ci-check-all-feature-powerset-linux="hack --workspace --feature-powerset --skip=__compress check" - -# testing -ci-doctest-default = "test --workspace --doc --no-fail-fast -- --nocapture" -ci-doctest = "test --workspace --all-features --doc --no-fail-fast -- --nocapture" diff --git a/.github/workflows/ci-post-merge.yml b/.github/workflows/ci-post-merge.yml index 4c4a3404f..c1afdb1cc 100644 --- a/.github/workflows/ci-post-merge.yml +++ b/.github/workflows/ci-post-merge.yml @@ -30,49 +30,41 @@ jobs: steps: - uses: actions/checkout@v4 + - name: Install nasm + if: matrix.target.os == 'windows-latest' + uses: ilammy/setup-nasm@v1.5.1 + - name: Install OpenSSL if: matrix.target.os == 'windows-latest' - run: choco install openssl -y --forcex64 --no-progress - - name: Set OpenSSL dir in env - if: matrix.target.os == 'windows-latest' + shell: bash run: | - echo 'OPENSSL_DIR=C:\Program Files\OpenSSL-Win64' | Out-File -FilePath $env:GITHUB_ENV -Append - echo 'OPENSSL_DIR=C:\Program Files\OpenSSL' | Out-File -FilePath $env:GITHUB_ENV -Append + set -e + choco install openssl --version=1.1.1.2100 -y --no-progress + echo 'OPENSSL_DIR=C:\Program Files\OpenSSL' >> $GITHUB_ENV + echo "RUSTFLAGS=-C target-feature=+crt-static" >> $GITHUB_ENV - name: Install Rust (${{ matrix.version.name }}) - uses: actions-rust-lang/setup-rust-toolchain@v1.6.0 + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 with: toolchain: ${{ matrix.version.version }} - - name: Install cargo-hack - uses: taiki-e/install-action@v2.22.0 + - name: Install just, cargo-hack, cargo-nextest, cargo-ci-cache-clean + uses: taiki-e/install-action@v2.42.17 with: - tool: cargo-hack + tool: just,cargo-hack,cargo-nextest,cargo-ci-cache-clean - name: check minimal - run: cargo ci-check-min + run: just check-min - name: check default - run: cargo ci-check-default + run: just check-default - name: tests timeout-minutes: 60 - run: | - cargo test --lib --tests -p=actix-router --all-features - cargo test --lib --tests -p=actix-http --all-features - cargo test --lib --tests -p=actix-web --features=rustls-0_20,rustls-0_21,openssl -- --skip=test_reading_deflate_encoding_large_random_rustls - cargo test --lib --tests -p=actix-web-codegen --all-features - cargo test --lib --tests -p=awc --all-features - cargo test --lib --tests -p=actix-http-test --all-features - cargo test --lib --tests -p=actix-test --all-features - cargo test --lib --tests -p=actix-files - cargo test --lib --tests -p=actix-multipart --all-features - cargo test --lib --tests -p=actix-web-actors --all-features + run: just test - - name: Clear the cargo caches - run: | - cargo install cargo-cache --version 0.8.3 --no-default-features --features ci-autoclean - cargo-cache + - name: CI cache clean + run: cargo-ci-cache-clean ci_feature_powerset_check: name: Verify Feature Combinations @@ -81,34 +73,19 @@ jobs: steps: - uses: actions/checkout@v4 - - name: Install Rust - uses: actions-rust-lang/setup-rust-toolchain@v1.6.0 + - name: Free Disk Space + run: ./scripts/free-disk-space.sh - - name: Install cargo-hack - uses: taiki-e/install-action@v2.22.0 - with: - tool: cargo-hack - - - name: check feature combinations - run: cargo ci-check-all-feature-powerset - - - name: check feature combinations - run: cargo ci-check-all-feature-powerset-linux - - nextest: - name: nextest - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 + - name: Setup mold linker + uses: rui314/setup-mold@v1 - name: Install Rust - uses: actions-rust-lang/setup-rust-toolchain@v1.6.0 + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 - - name: Install nextest - uses: taiki-e/install-action@v2.22.0 + - name: Install just, cargo-hack + uses: taiki-e/install-action@v2.42.17 with: - tool: nextest + tool: just,cargo-hack - - name: Test with cargo-nextest - run: cargo nextest run + - name: Check feature combinations + run: just check-feature-combinations diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 5627865c1..f98e17a87 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -16,7 +16,13 @@ concurrency: cancel-in-progress: true jobs: + read_msrv: + name: Read MSRV + uses: actions-rust-lang/msrv/.github/workflows/msrv.yml@v0.1.0 + build_and_test: + needs: read_msrv + strategy: fail-fast: false matrix: @@ -26,7 +32,7 @@ jobs: - { name: macOS, os: macos-latest, triple: x86_64-apple-darwin } - { name: Windows, os: windows-latest, triple: x86_64-pc-windows-msvc } version: - - { name: msrv, version: 1.68.0 } + - { name: msrv, version: "${{ needs.read_msrv.outputs.msrv }}" } - { name: stable, version: stable } name: ${{ matrix.target.name }} / ${{ matrix.version.name }} @@ -35,56 +41,49 @@ jobs: steps: - uses: actions/checkout@v4 + - name: Install nasm + if: matrix.target.os == 'windows-latest' + uses: ilammy/setup-nasm@v1.5.1 + - name: Install OpenSSL if: matrix.target.os == 'windows-latest' - run: choco install openssl -y --forcex64 --no-progress - - name: Set OpenSSL dir in env - if: matrix.target.os == 'windows-latest' + shell: bash run: | - echo 'OPENSSL_DIR=C:\Program Files\OpenSSL-Win64' | Out-File -FilePath $env:GITHUB_ENV -Append - echo 'OPENSSL_DIR=C:\Program Files\OpenSSL' | Out-File -FilePath $env:GITHUB_ENV -Append + set -e + choco install openssl --version=1.1.1.2100 -y --no-progress + echo 'OPENSSL_DIR=C:\Program Files\OpenSSL' >> $GITHUB_ENV + echo "RUSTFLAGS=-C target-feature=+crt-static" >> $GITHUB_ENV + + - name: Setup mold linker + if: matrix.target.os == 'ubuntu-latest' + uses: rui314/setup-mold@v1 - name: Install Rust (${{ matrix.version.name }}) - uses: actions-rust-lang/setup-rust-toolchain@v1.6.0 + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 with: toolchain: ${{ matrix.version.version }} - - name: Install cargo-hack - uses: taiki-e/install-action@v2.22.0 + - name: Install just, cargo-hack, cargo-nextest, cargo-ci-cache-clean + uses: taiki-e/install-action@v2.42.17 with: - tool: cargo-hack + tool: just,cargo-hack,cargo-nextest,cargo-ci-cache-clean - name: workaround MSRV issues if: matrix.version.name == 'msrv' - run: | - cargo update -p=clap --precise=4.3.24 - cargo update -p=clap_lex --precise=0.5.0 - cargo update -p=anstyle --precise=1.0.2 + run: just downgrade-for-msrv - name: check minimal - run: cargo ci-check-min + run: just check-min - name: check default - run: cargo ci-check-default + run: just check-default - name: tests timeout-minutes: 60 - run: | - cargo test --lib --tests -p=actix-router --all-features - cargo test --lib --tests -p=actix-http --all-features - cargo test --lib --tests -p=actix-web --features=rustls-0_20,rustls-0_21,openssl -- --skip=test_reading_deflate_encoding_large_random_rustls - cargo test --lib --tests -p=actix-web-codegen --all-features - cargo test --lib --tests -p=awc --all-features - cargo test --lib --tests -p=actix-http-test --all-features - cargo test --lib --tests -p=actix-test --all-features - cargo test --lib --tests -p=actix-files - cargo test --lib --tests -p=actix-multipart --all-features - cargo test --lib --tests -p=actix-web-actors --all-features + run: just test - - name: Clear the cargo caches - run: | - cargo install cargo-cache --version 0.8.3 --no-default-features --features ci-autoclean - cargo-cache + - name: CI cache clean + run: cargo-ci-cache-clean io-uring: name: io-uring tests @@ -93,7 +92,7 @@ jobs: - uses: actions/checkout@v4 - name: Install Rust - uses: actions-rust-lang/setup-rust-toolchain@v1.6.0 + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 with: toolchain: nightly @@ -109,10 +108,14 @@ jobs: - uses: actions/checkout@v4 - name: Install Rust (nightly) - uses: actions-rust-lang/setup-rust-toolchain@v1.6.0 + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 with: toolchain: nightly + - name: Install just + uses: taiki-e/install-action@v2.42.17 + with: + tool: just + - name: doc tests - run: cargo ci-doctest - timeout-minutes: 60 + run: just test-docs diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index c451df22a..f30c8ff42 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -17,21 +17,24 @@ jobs: steps: - uses: actions/checkout@v4 - - name: Install Rust - uses: actions-rust-lang/setup-rust-toolchain@v1.6.0 + - name: Install Rust (nightly) + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 with: - components: llvm-tools-preview + toolchain: nightly + components: llvm-tools - - name: Install cargo-llvm-cov - uses: taiki-e/install-action@v2.22.0 + - name: Install just, cargo-llvm-cov, cargo-nextest + uses: taiki-e/install-action@v2.42.17 with: - tool: cargo-llvm-cov + tool: just,cargo-llvm-cov,cargo-nextest - name: Generate code coverage - run: cargo llvm-cov --workspace --all-features --codecov --output-path codecov.json + run: just test-coverage-codecov - name: Upload coverage to Codecov - uses: codecov/codecov-action@v3.1.4 + uses: codecov/codecov-action@v4.5.0 with: files: codecov.json fail_ci_if_error: true + env: + CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }} diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 13362dbb0..f3bdb3515 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -17,12 +17,13 @@ jobs: steps: - uses: actions/checkout@v4 - - uses: actions-rust-lang/setup-rust-toolchain@v1.6.0 + - name: Install Rust (nightly) + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 with: toolchain: nightly components: rustfmt - - name: Check with rustfmt + - name: Check with Rustfmt run: cargo fmt --all -- --check clippy: @@ -35,7 +36,7 @@ jobs: - uses: actions/checkout@v4 - name: Install Rust - uses: actions-rust-lang/setup-rust-toolchain@v1.6.0 + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 with: components: clippy @@ -53,7 +54,8 @@ jobs: steps: - uses: actions/checkout@v4 - - uses: actions-rust-lang/setup-rust-toolchain@v1.6.0 + - name: Install Rust (nightly) + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 with: toolchain: nightly components: rust-docs @@ -63,25 +65,52 @@ jobs: RUSTDOCFLAGS: -D warnings run: cargo +nightly doc --no-deps --workspace --all-features - public-api-diff: + check-external-types: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4 + + - name: Install Rust (nightly-2024-05-01) + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 + with: + toolchain: nightly-2024-05-01 + + - name: Install just + uses: taiki-e/install-action@v2.42.17 + with: + tool: just + + - name: Install cargo-check-external-types + uses: taiki-e/cache-cargo-install-action@v2.0.1 + with: + tool: cargo-check-external-types + + - name: check external types + run: just check-external-types-all +nightly-2024-05-01 + + public-api-diff: + runs-on: ubuntu-latest + steps: + - name: Checkout main branch + uses: actions/checkout@v4 with: ref: ${{ github.base_ref }} - - uses: actions/checkout@v4 + - name: Checkout PR branch + uses: actions/checkout@v4 - - uses: actions-rust-lang/setup-rust-toolchain@v1.6.0 + - name: Install Rust (nightly-2024-06-07) + uses: actions-rust-lang/setup-rust-toolchain@v1.9.0 with: - toolchain: nightly-2023-08-25 + toolchain: nightly-2024-06-07 - - uses: taiki-e/cache-cargo-install-action@v1.3.0 + - name: Install cargo-public-api + uses: taiki-e/install-action@v2.42.17 with: tool: cargo-public-api - - name: generate API diff + - name: Generate API diff run: | for f in $(find -mindepth 2 -maxdepth 2 -name Cargo.toml); do - cargo public-api --manifest-path "$f" diff ${{ github.event.pull_request.base.sha }}..${{ github.sha }} + cargo public-api --manifest-path "$f" --simplified diff ${{ github.event.pull_request.base.sha }}..${{ github.sha }} done diff --git a/.github/workflows/upload-doc.yml b/.github/workflows/upload-doc.yml deleted file mode 100644 index f4bd0ceeb..000000000 --- a/.github/workflows/upload-doc.yml +++ /dev/null @@ -1,41 +0,0 @@ -name: Upload Documentation - -on: - push: - branches: [master] - -permissions: - contents: read - -concurrency: - group: ${{ github.workflow }}-${{ github.ref }} - cancel-in-progress: true - -jobs: - build: - permissions: - contents: write - - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v4 - - - name: Install Rust - uses: actions-rust-lang/setup-rust-toolchain@v1.6.0 - with: - toolchain: nightly - - - name: Build Docs - run: cargo +nightly doc --no-deps --workspace --all-features - env: - RUSTDOCFLAGS: --cfg=docsrs - - - name: Tweak HTML - run: echo '' > target/doc/index.html - - - name: Deploy to GitHub Pages - uses: JamesIves/github-pages-deploy-action@v4.5.0 - with: - folder: target/doc - single-commit: true diff --git a/.prettierrc.yml b/.prettierrc.yml index b61fd8974..d70303479 100644 --- a/.prettierrc.yml +++ b/.prettierrc.yml @@ -1,5 +1,5 @@ overrides: - - files: '*.md' + - files: "*.md" options: printWidth: 9999 proseWrap: never diff --git a/Cargo.toml b/Cargo.toml index 58fd96935..51f998314 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -15,9 +15,11 @@ members = [ ] [workspace.package] +homepage = "https://actix.rs" +repository = "https://github.com/actix/actix-web" license = "MIT OR Apache-2.0" edition = "2021" -rust-version = "1.68" +rust-version = "1.72" [profile.dev] # Disabling debug info speeds up builds a bunch and we don't rely on it for debugging that much. @@ -49,3 +51,11 @@ awc = { path = "awc" } # actix-utils = { path = "../actix-net/actix-utils" } # actix-tls = { path = "../actix-net/actix-tls" } # actix-server = { path = "../actix-net/actix-server" } + +[workspace.lints.rust] +rust_2018_idioms = { level = "deny" } +future_incompatible = { level = "deny" } +nonstandard_style = { level = "deny" } + +[workspace.lints.clippy] +# clone_on_ref_ptr = { level = "deny" } diff --git a/actix-files/CHANGES.md b/actix-files/CHANGES.md index 15c2958f0..e94f43907 100644 --- a/actix-files/CHANGES.md +++ b/actix-files/CHANGES.md @@ -2,6 +2,18 @@ ## Unreleased +## 0.6.6 + +- Update `tokio-uring` dependency to `0.4`. +- Minimum supported Rust version (MSRV) is now 1.72. + +## 0.6.5 + +- Fix handling of special characters in filenames. + +## 0.6.4 + +- Fix handling of newlines in filenames. - Minimum supported Rust version (MSRV) is now 1.68 due to transitive `time` dependency. ## 0.6.3 diff --git a/actix-files/Cargo.toml b/actix-files/Cargo.toml index efecb0889..0c02359c4 100644 --- a/actix-files/Cargo.toml +++ b/actix-files/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "actix-files" -version = "0.6.3" +version = "0.6.6" authors = [ "Nikolay Kim ", "Rob Ede ", @@ -13,9 +13,14 @@ categories = ["asynchronous", "web-programming::http-server"] license = "MIT OR Apache-2.0" edition = "2021" -[lib] -name = "actix_files" -path = "src/lib.rs" +[package.metadata.cargo_check_external_types] +allowed_external_types = [ + "actix_http::*", + "actix_service::*", + "actix_web::*", + "http::*", + "mime::*", +] [features] experimental-io-uring = ["actix-web/experimental-io-uring", "tokio-uring"] @@ -40,12 +45,15 @@ v_htmlescape = "0.15.5" # experimental-io-uring [target.'cfg(target_os = "linux")'.dependencies] -tokio-uring = { version = "0.4", optional = true, features = ["bytes"] } -actix-server = { version = "2.2", optional = true } # ensure matching tokio-uring versions +tokio-uring = { version = "0.5", optional = true, features = ["bytes"] } +actix-server = { version = "2.4", optional = true } # ensure matching tokio-uring versions [dev-dependencies] actix-rt = "2.7" actix-test = "0.1" actix-web = "4" -env_logger = "0.10" +env_logger = "0.11" tempfile = "3.2" + +[lints] +workspace = true diff --git a/actix-files/README.md b/actix-files/README.md index 3e656c431..f6d5143f5 100644 --- a/actix-files/README.md +++ b/actix-files/README.md @@ -1,18 +1,32 @@ -# actix-files +# `actix-files` -> Static file serving for Actix Web + [![crates.io](https://img.shields.io/crates/v/actix-files?label=latest)](https://crates.io/crates/actix-files) -[![Documentation](https://docs.rs/actix-files/badge.svg?version=0.6.3)](https://docs.rs/actix-files/0.6.3) -![Version](https://img.shields.io/badge/rustc-1.68+-ab6000.svg) +[![Documentation](https://docs.rs/actix-files/badge.svg?version=0.6.6)](https://docs.rs/actix-files/0.6.6) +![Version](https://img.shields.io/badge/rustc-1.72+-ab6000.svg) ![License](https://img.shields.io/crates/l/actix-files.svg)
-[![dependency status](https://deps.rs/crate/actix-files/0.6.3/status.svg)](https://deps.rs/crate/actix-files/0.6.3) +[![dependency status](https://deps.rs/crate/actix-files/0.6.6/status.svg)](https://deps.rs/crate/actix-files/0.6.6) [![Download](https://img.shields.io/crates/d/actix-files.svg)](https://crates.io/crates/actix-files) [![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x) -## Documentation & Resources + -- [API Documentation](https://docs.rs/actix-files) -- [Example Project](https://github.com/actix/examples/tree/master/basics/static-files) -- Minimum Supported Rust Version (MSRV): 1.68 + + +Static file serving for Actix Web. + +Provides a non-blocking service for serving static files from disk. + +## Examples + +```rust +use actix_web::App; +use actix_files::Files; + +let app = App::new() + .service(Files::new("/static", ".").prefer_utf8(true)); +``` + + diff --git a/actix-files/src/lib.rs b/actix-files/src/lib.rs index 943130e16..551a14fa4 100644 --- a/actix-files/src/lib.rs +++ b/actix-files/src/lib.rs @@ -11,8 +11,7 @@ //! .service(Files::new("/static", ".").prefer_utf8(true)); //! ``` -#![deny(rust_2018_idioms, nonstandard_style)] -#![warn(future_incompatible, missing_docs, missing_debug_implementations)] +#![warn(missing_docs, missing_debug_implementations)] #![doc(html_logo_url = "https://actix.rs/img/logo.png")] #![doc(html_favicon_url = "https://actix.rs/favicon.ico")] #![cfg_attr(docsrs, feature(doc_auto_cfg))] @@ -75,7 +74,7 @@ mod tests { dev::ServiceFactory, guard, http::{ - header::{self, ContentDisposition, DispositionParam, DispositionType}, + header::{self, ContentDisposition, DispositionParam}, Method, StatusCode, }, middleware::Compress, @@ -307,11 +306,11 @@ mod tests { let resp = file.respond_to(&req); assert_eq!( resp.headers().get(header::CONTENT_TYPE).unwrap(), - "application/javascript; charset=utf-8" + "text/javascript", ); assert_eq!( resp.headers().get(header::CONTENT_DISPOSITION).unwrap(), - "inline; filename=\"test.js\"" + "inline; filename=\"test.js\"", ); } @@ -568,6 +567,30 @@ mod tests { assert_eq!(bytes, data); } + #[cfg(not(target_os = "windows"))] + #[actix_rt::test] + async fn test_static_files_with_special_characters() { + // Create the file we want to test against ad-hoc. We can't check it in as otherwise + // Windows can't even checkout this repository. + let temp_dir = tempfile::tempdir().unwrap(); + let file_with_newlines = temp_dir.path().join("test\n\x0B\x0C\rnewline.text"); + fs::write(&file_with_newlines, "Look at my newlines").unwrap(); + + let srv = test::init_service( + App::new().service(Files::new("/", temp_dir.path()).index_file("Cargo.toml")), + ) + .await; + let request = TestRequest::get() + .uri("/test%0A%0B%0C%0Dnewline.text") + .to_request(); + let response = test::call_service(&srv, request).await; + assert_eq!(response.status(), StatusCode::OK); + + let bytes = test::read_body(response).await; + let data = web::Bytes::from(fs::read(file_with_newlines).unwrap()); + assert_eq!(bytes, data); + } + #[actix_rt::test] async fn test_files_not_allowed() { let srv = test::init_service(App::new().service(Files::new("/", "."))).await; @@ -840,9 +863,9 @@ mod tests { #[actix_rt::test] async fn test_percent_encoding_2() { - let tmpdir = tempfile::tempdir().unwrap(); + let temp_dir = tempfile::tempdir().unwrap(); let filename = match cfg!(unix) { - true => "ض:?#[]{}<>()@!$&'`|*+,;= %20.test", + true => "ض:?#[]{}<>()@!$&'`|*+,;= %20\n.test", false => "ض#[]{}()@!$&'`+,;= %20.test", }; let filename_encoded = filename @@ -852,9 +875,9 @@ mod tests { write!(&mut buf, "%{:02X}", c).unwrap(); buf }); - std::fs::File::create(tmpdir.path().join(filename)).unwrap(); + std::fs::File::create(temp_dir.path().join(filename)).unwrap(); - let srv = test::init_service(App::new().service(Files::new("", tmpdir.path()))).await; + let srv = test::init_service(App::new().service(Files::new("/", temp_dir.path()))).await; let req = TestRequest::get() .uri(&format!("/{}", filename_encoded)) diff --git a/actix-files/src/named.rs b/actix-files/src/named.rs index d7795ba73..9e4a37737 100644 --- a/actix-files/src/named.rs +++ b/actix-files/src/named.rs @@ -24,7 +24,6 @@ use bitflags::bitflags; use derive_more::{Deref, DerefMut}; use futures_core::future::LocalBoxFuture; use mime::Mime; -use mime_guess::from_path; use crate::{encoding::equiv_utf8_text, range::HttpRange}; @@ -128,7 +127,7 @@ impl NamedFile { } }; - let ct = from_path(&path).first_or_octet_stream(); + let ct = mime_guess::from_path(&path).first_or_octet_stream(); let disposition = match ct.type_() { mime::IMAGE | mime::TEXT | mime::AUDIO | mime::VIDEO => DispositionType::Inline, @@ -140,7 +139,13 @@ impl NamedFile { _ => DispositionType::Attachment, }; - let mut parameters = vec![DispositionParam::Filename(String::from(filename.as_ref()))]; + // replace special characters in filenames which could occur on some filesystems + let filename_s = filename + .replace('\n', "%0A") // \n line break + .replace('\x0B', "%0B") // \v vertical tab + .replace('\x0C', "%0C") // \f form feed + .replace('\r', "%0D"); // \r carriage return + let mut parameters = vec![DispositionParam::Filename(filename_s)]; if !filename.is_ascii() { parameters.push(DispositionParam::FilenameExt(ExtendedValue { diff --git a/actix-http-test/CHANGES.md b/actix-http-test/CHANGES.md index 065141b20..4d133e3ec 100644 --- a/actix-http-test/CHANGES.md +++ b/actix-http-test/CHANGES.md @@ -2,6 +2,10 @@ ## Unreleased +- Minimum supported Rust version (MSRV) is now 1.72. + +## 3.2.0 + - Minimum supported Rust version (MSRV) is now 1.68 due to transitive `time` dependency. ## 3.1.0 diff --git a/actix-http-test/Cargo.toml b/actix-http-test/Cargo.toml index 0881e0bc4..7ccb70a45 100644 --- a/actix-http-test/Cargo.toml +++ b/actix-http-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "actix-http-test" -version = "3.1.0" +version = "3.2.0" authors = ["Nikolay Kim "] description = "Various helpers for Actix applications to use during testing" keywords = ["http", "web", "framework", "async", "futures"] @@ -18,9 +18,17 @@ edition = "2021" [package.metadata.docs.rs] features = [] -[lib] -name = "actix_http_test" -path = "src/lib.rs" +[package.metadata.cargo_check_external_types] +allowed_external_types = [ + "actix_codec::*", + "actix_http::*", + "actix_server::*", + "awc::*", + "bytes::*", + "futures_core::*", + "http::*", + "tokio::*", +] [features] default = [] @@ -51,3 +59,6 @@ tokio = { version = "1.24.2", features = ["sync"] } [dev-dependencies] actix-http = "3" + +[lints] +workspace = true diff --git a/actix-http-test/README.md b/actix-http-test/README.md index 4b286e603..939028121 100644 --- a/actix-http-test/README.md +++ b/actix-http-test/README.md @@ -1,17 +1,20 @@ -# actix-http-test +# `actix-http-test` -> Various helpers for Actix applications to use during testing. + [![crates.io](https://img.shields.io/crates/v/actix-http-test?label=latest)](https://crates.io/crates/actix-http-test) -[![Documentation](https://docs.rs/actix-http-test/badge.svg?version=3.1.0)](https://docs.rs/actix-http-test/3.1.0) -![Version](https://img.shields.io/badge/rustc-1.68+-ab6000.svg) +[![Documentation](https://docs.rs/actix-http-test/badge.svg?version=3.2.0)](https://docs.rs/actix-http-test/3.2.0) +![Version](https://img.shields.io/badge/rustc-1.72+-ab6000.svg) ![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http-test)
-[![Dependency Status](https://deps.rs/crate/actix-http-test/3.1.0/status.svg)](https://deps.rs/crate/actix-http-test/3.1.0) +[![Dependency Status](https://deps.rs/crate/actix-http-test/3.2.0/status.svg)](https://deps.rs/crate/actix-http-test/3.2.0) [![Download](https://img.shields.io/crates/d/actix-http-test.svg)](https://crates.io/crates/actix-http-test) [![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x) -## Documentation & Resources + -- [API Documentation](https://docs.rs/actix-http-test) -- Minimum Supported Rust Version (MSRV): 1.68 + + +Various helpers for Actix applications to use during testing. + + diff --git a/actix-http-test/src/lib.rs b/actix-http-test/src/lib.rs index 554af9102..d83b0b3ea 100644 --- a/actix-http-test/src/lib.rs +++ b/actix-http-test/src/lib.rs @@ -1,7 +1,5 @@ //! Various helpers for Actix applications to use during testing. -#![deny(rust_2018_idioms, nonstandard_style)] -#![warn(future_incompatible)] #![doc(html_logo_url = "https://actix.rs/img/logo.png")] #![doc(html_favicon_url = "https://actix.rs/favicon.ico")] #![cfg_attr(docsrs, feature(doc_auto_cfg))] diff --git a/actix-http/CHANGES.md b/actix-http/CHANGES.md index eadd5c515..15b211c1a 100644 --- a/actix-http/CHANGES.md +++ b/actix-http/CHANGES.md @@ -2,14 +2,57 @@ ## Unreleased +## 3.9.0 + +### Added + +- Implement `FromIterator<(HeaderName, HeaderValue)>` for `HeaderMap`. + +## 3.8.0 + +### Added + +- Add `error::InvalidStatusCode` re-export. + +## 3.7.0 + +### Added + +- Add `rustls-0_23` crate feature +- Add `{h1::H1Service, h2::H2Service, HttpService}::rustls_0_23()` and `HttpService::rustls_0_23_with_config()` service constructors. + ### Changed -- Updated `zstd` dependency to `0.13`. -- Implemented `From` for `http::HeaderMap`. +- Update `brotli` dependency to `6`. +- Minimum supported Rust version (MSRV) is now 1.72. + +## 3.6.0 + +### Added + +- Add `rustls-0_22` crate feature. +- Add `{h1::H1Service, h2::H2Service, HttpService}::rustls_0_22()` and `HttpService::rustls_0_22_with_config()` service constructors. +- Implement `From<&HeaderMap>` for `http::HeaderMap`. + +## 3.5.1 ### Fixed -- Do not encode zero-sized response bodies +- Prevent hang when returning zero-sized response bodies through compression layer. + +## 3.5.0 + +### Added + +- Implement `From` for `http::HeaderMap`. + +### Changed + +- Updated `zstd` dependency to `0.13`. + +### Fixed + +- Prevent compression of zero-sized response bodies. ## 3.4.0 diff --git a/actix-http/Cargo.toml b/actix-http/Cargo.toml index 8476d9086..e8dea0bd0 100644 --- a/actix-http/Cargo.toml +++ b/actix-http/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "actix-http" -version = "3.4.0" +version = "3.9.0" authors = [ "Nikolay Kim ", "Rob Ede ", @@ -20,48 +20,86 @@ edition.workspace = true rust-version.workspace = true [package.metadata.docs.rs] -# features that docs.rs will build with -features = ["http2", "ws", "openssl", "rustls-0_20", "rustls-0_21", "compress-brotli", "compress-gzip", "compress-zstd"] +rustdoc-args = ["--cfg", "docsrs"] +features = [ + "http2", + "ws", + "openssl", + "rustls-0_20", + "rustls-0_21", + "rustls-0_22", + "rustls-0_23", + "compress-brotli", + "compress-gzip", + "compress-zstd", +] -[lib] -name = "actix_http" -path = "src/lib.rs" +[package.metadata.cargo_check_external_types] +allowed_external_types = [ + "actix_codec::*", + "actix_service::*", + "actix_tls::*", + "actix_utils::*", + "bytes::*", + "bytestring::*", + "encoding_rs::*", + "futures_core::*", + "h2::*", + "http::*", + "httparse::*", + "language_tags::*", + "mime::*", + "openssl::*", + "rustls::*", + "tokio_util::*", + "tokio::*", +] [features] default = [] # HTTP/2 protocol support -http2 = ["h2"] +http2 = ["dep:h2"] # WebSocket protocol implementation ws = [ - "local-channel", - "base64", - "rand", - "sha1", + "dep:local-channel", + "dep:base64", + "dep:rand", + "dep:sha1", ] # TLS via OpenSSL -openssl = ["actix-tls/accept", "actix-tls/openssl"] +openssl = ["__tls", "actix-tls/accept", "actix-tls/openssl"] # TLS via Rustls v0.20 -rustls = ["rustls-0_20"] +rustls = ["__tls", "rustls-0_20"] # TLS via Rustls v0.20 -rustls-0_20 = ["actix-tls/accept", "actix-tls/rustls-0_20"] +rustls-0_20 = ["__tls", "actix-tls/accept", "actix-tls/rustls-0_20"] # TLS via Rustls v0.21 -rustls-0_21 = ["actix-tls/accept", "actix-tls/rustls-0_21"] +rustls-0_21 = ["__tls", "actix-tls/accept", "actix-tls/rustls-0_21"] + +# TLS via Rustls v0.22 +rustls-0_22 = ["__tls", "actix-tls/accept", "actix-tls/rustls-0_22"] + +# TLS via Rustls v0.23 +rustls-0_23 = ["__tls", "actix-tls/accept", "actix-tls/rustls-0_23"] # Compression codecs -compress-brotli = ["__compress", "brotli"] -compress-gzip = ["__compress", "flate2"] -compress-zstd = ["__compress", "zstd"] +compress-brotli = ["__compress", "dep:brotli"] +compress-gzip = ["__compress", "dep:flate2"] +compress-zstd = ["__compress", "dep:zstd"] # Internal (PRIVATE!) features used to aid testing and checking feature status. # Don't rely on these whatsoever. They are semver-exempt and may disappear at anytime. __compress = [] +# Internal (PRIVATE!) features used to aid checking feature status. +# Don't rely on these whatsoever. They may disappear at anytime. +__tls = [] + [dependencies] actix-service = "2" actix-codec = "0.5" @@ -89,54 +127,62 @@ tokio-util = { version = "0.7", features = ["io", "codec"] } tracing = { version = "0.1.30", default-features = false, features = ["log"] } # http2 -h2 = { version = "0.3.17", optional = true } +h2 = { version = "0.3.26", optional = true } # websockets local-channel = { version = "0.1", optional = true } -base64 = { version = "0.21", optional = true } +base64 = { version = "0.22", optional = true } rand = { version = "0.8", optional = true } sha1 = { version = "0.10", optional = true } # openssl/rustls -actix-tls = { version = "3.1", default-features = false, optional = true } +actix-tls = { version = "3.4", default-features = false, optional = true } # compress-* -brotli = { version = "3.3.3", optional = true } +brotli = { version = "6", optional = true } flate2 = { version = "1.0.13", optional = true } zstd = { version = "0.13", optional = true } [dev-dependencies] actix-http-test = { version = "3", features = ["openssl"] } actix-server = "2" -actix-tls = { version = "3.1", features = ["openssl"] } +actix-tls = { version = "3.4", features = ["openssl", "rustls-0_23-webpki-roots"] } actix-web = "4" async-stream = "0.3" criterion = { version = "0.5", features = ["html_reports"] } -env_logger = "0.10" +divan = "0.1.8" +env_logger = "0.11" futures-util = { version = "0.3.17", default-features = false, features = ["alloc"] } memchr = "2.4" once_cell = "1.9" -rcgen = "0.11" +rcgen = "0.13" regex = "1.3" rustversion = "1" -rustls-pemfile = "1" +rustls-pemfile = "2" serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" static_assertions = "1" tls-openssl = { package = "openssl", version = "0.10.55" } -tls-rustls_021 = { package = "rustls", version = "0.21" } +tls-rustls_023 = { package = "rustls", version = "0.23" } tokio = { version = "1.24.2", features = ["net", "rt", "macros"] } +[lints] +workspace = true + [[example]] name = "ws" -required-features = ["ws", "rustls-0_21"] +required-features = ["ws", "rustls-0_23"] [[example]] name = "tls_rustls" -required-features = ["http2", "rustls-0_21"] +required-features = ["http2", "rustls-0_23"] [[bench]] name = "response-body-compression" harness = false required-features = ["compress-brotli", "compress-gzip", "compress-zstd"] + +[[bench]] +name = "date-formatting" +harness = false diff --git a/actix-http/README.md b/actix-http/README.md index 7d499f4b3..f78ea86f5 100644 --- a/actix-http/README.md +++ b/actix-http/README.md @@ -5,21 +5,16 @@ [![crates.io](https://img.shields.io/crates/v/actix-http?label=latest)](https://crates.io/crates/actix-http) -[![Documentation](https://docs.rs/actix-http/badge.svg?version=3.4.0)](https://docs.rs/actix-http/3.4.0) -![Version](https://img.shields.io/badge/rustc-1.68+-ab6000.svg) +[![Documentation](https://docs.rs/actix-http/badge.svg?version=3.9.0)](https://docs.rs/actix-http/3.9.0) +![Version](https://img.shields.io/badge/rustc-1.72+-ab6000.svg) ![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http.svg)
-[![dependency status](https://deps.rs/crate/actix-http/3.4.0/status.svg)](https://deps.rs/crate/actix-http/3.4.0) +[![dependency status](https://deps.rs/crate/actix-http/3.9.0/status.svg)](https://deps.rs/crate/actix-http/3.9.0) [![Download](https://img.shields.io/crates/d/actix-http.svg)](https://crates.io/crates/actix-http) [![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x) -## Documentation & Resources - -- [API Documentation](https://docs.rs/actix-http) -- Minimum Supported Rust Version (MSRV): 1.68 - ## Examples ```rust diff --git a/actix-http/benches/date-formatting.rs b/actix-http/benches/date-formatting.rs new file mode 100644 index 000000000..26d0f3daa --- /dev/null +++ b/actix-http/benches/date-formatting.rs @@ -0,0 +1,20 @@ +use std::time::SystemTime; + +use actix_http::header::HttpDate; +use divan::{black_box, AllocProfiler, Bencher}; + +#[global_allocator] +static ALLOC: AllocProfiler = AllocProfiler::system(); + +#[divan::bench] +fn date_formatting(b: Bencher<'_, '_>) { + let now = SystemTime::now(); + + b.bench(|| { + black_box(HttpDate::from(black_box(now)).to_string()); + }) +} + +fn main() { + divan::main(); +} diff --git a/actix-http/examples/h2c-detect.rs b/actix-http/examples/h2c-detect.rs index aa3dd5d31..b0bde3fe6 100644 --- a/actix-http/examples/h2c-detect.rs +++ b/actix-http/examples/h2c-detect.rs @@ -8,7 +8,7 @@ use std::{convert::Infallible, io}; -use actix_http::{HttpService, Request, Response, StatusCode}; +use actix_http::{body::BodyStream, HttpService, Request, Response, StatusCode}; use actix_server::Server; #[tokio::main(flavor = "current_thread")] @@ -19,7 +19,12 @@ async fn main() -> io::Result<()> { .bind("h2c-detect", ("127.0.0.1", 8080), || { HttpService::build() .finish(|_req: Request| async move { - Ok::<_, Infallible>(Response::build(StatusCode::OK).body("Hello!")) + Ok::<_, Infallible>(Response::build(StatusCode::OK).body(BodyStream::new( + futures_util::stream::iter([ + Ok::<_, String>("123".into()), + Err("wertyuikmnbvcxdfty6t".to_owned()), + ]), + ))) }) .tcp_auto_h2c() })? diff --git a/actix-http/examples/tls_rustls.rs b/actix-http/examples/tls_rustls.rs index fbb55c6a4..17303c556 100644 --- a/actix-http/examples/tls_rustls.rs +++ b/actix-http/examples/tls_rustls.rs @@ -12,7 +12,7 @@ //! Protocol: HTTP/1.1 //! ``` -extern crate tls_rustls_021 as rustls; +extern crate tls_rustls_023 as rustls; use std::io; @@ -36,31 +36,34 @@ async fn main() -> io::Result<()> { ); ok::<_, Error>(Response::ok().set_body(body)) }) - .rustls_021(rustls_config()) + .rustls_0_23(rustls_config()) })? .run() .await } fn rustls_config() -> rustls::ServerConfig { - let cert = rcgen::generate_simple_self_signed(vec!["localhost".to_owned()]).unwrap(); - let cert_file = cert.serialize_pem().unwrap(); - let key_file = cert.serialize_private_key_pem(); + let rcgen::CertifiedKey { cert, key_pair } = + rcgen::generate_simple_self_signed(["localhost".to_owned()]).unwrap(); + let cert_file = cert.pem(); + let key_file = key_pair.serialize_pem(); let cert_file = &mut io::BufReader::new(cert_file.as_bytes()); let key_file = &mut io::BufReader::new(key_file.as_bytes()); let cert_chain = rustls_pemfile::certs(cert_file) - .unwrap() - .into_iter() - .map(rustls::Certificate) - .collect(); - let mut keys = rustls_pemfile::pkcs8_private_keys(key_file).unwrap(); + .collect::, _>>() + .unwrap(); + let mut keys = rustls_pemfile::pkcs8_private_keys(key_file) + .collect::, _>>() + .unwrap(); let mut config = rustls::ServerConfig::builder() - .with_safe_defaults() .with_no_client_auth() - .with_single_cert(cert_chain, rustls::PrivateKey(keys.remove(0))) + .with_single_cert( + cert_chain, + rustls::pki_types::PrivateKeyDer::Pkcs8(keys.remove(0)), + ) .unwrap(); const H1_ALPN: &[u8] = b"http/1.1"; diff --git a/actix-http/examples/ws.rs b/actix-http/examples/ws.rs index 241175ae2..fb86bc5ea 100644 --- a/actix-http/examples/ws.rs +++ b/actix-http/examples/ws.rs @@ -1,7 +1,7 @@ //! Sets up a WebSocket server over TCP and TLS. //! Sends a heartbeat message every 4 seconds but does not respond to any incoming frames. -extern crate tls_rustls_021 as rustls; +extern crate tls_rustls_023 as rustls; use std::{ io, @@ -30,7 +30,7 @@ async fn main() -> io::Result<()> { .bind("tls", ("127.0.0.1", 8443), || { HttpService::build() .finish(handler) - .rustls_021(tls_config()) + .rustls_0_23(tls_config()) })? .run() .await @@ -85,27 +85,27 @@ impl Stream for Heartbeat { fn tls_config() -> rustls::ServerConfig { use std::io::BufReader; - use rustls::{Certificate, PrivateKey}; use rustls_pemfile::{certs, pkcs8_private_keys}; - let cert = rcgen::generate_simple_self_signed(vec!["localhost".to_owned()]).unwrap(); - let cert_file = cert.serialize_pem().unwrap(); - let key_file = cert.serialize_private_key_pem(); + let rcgen::CertifiedKey { cert, key_pair } = + rcgen::generate_simple_self_signed(["localhost".to_owned()]).unwrap(); + let cert_file = cert.pem(); + let key_file = key_pair.serialize_pem(); let cert_file = &mut BufReader::new(cert_file.as_bytes()); let key_file = &mut BufReader::new(key_file.as_bytes()); - let cert_chain = certs(cert_file) - .unwrap() - .into_iter() - .map(Certificate) - .collect(); - let mut keys = pkcs8_private_keys(key_file).unwrap(); + let cert_chain = certs(cert_file).collect::, _>>().unwrap(); + let mut keys = pkcs8_private_keys(key_file) + .collect::, _>>() + .unwrap(); let mut config = rustls::ServerConfig::builder() - .with_safe_defaults() .with_no_client_auth() - .with_single_cert(cert_chain, PrivateKey(keys.remove(0))) + .with_single_cert( + cert_chain, + rustls::pki_types::PrivateKeyDer::Pkcs8(keys.remove(0)), + ) .unwrap(); config.alpn_protocols.push(b"http/1.1".to_vec()); diff --git a/actix-http/src/body/message_body.rs b/actix-http/src/body/message_body.rs index c3f55ce7d..739fe5027 100644 --- a/actix-http/src/body/message_body.rs +++ b/actix-http/src/body/message_body.rs @@ -531,7 +531,6 @@ where mod tests { use actix_rt::pin; use actix_utils::future::poll_fn; - use bytes::{Bytes, BytesMut}; use futures_util::stream; use super::*; diff --git a/actix-http/src/date.rs b/actix-http/src/date.rs index 1358bbd8c..735dd9100 100644 --- a/actix-http/src/date.rs +++ b/actix-http/src/date.rs @@ -28,7 +28,7 @@ impl Date { fn update(&mut self) { self.pos = 0; - write!(self, "{}", httpdate::fmt_http_date(SystemTime::now())).unwrap(); + write!(self, "{}", httpdate::HttpDate::from(SystemTime::now())).unwrap(); } } diff --git a/actix-http/src/encoding/encoder.rs b/actix-http/src/encoding/encoder.rs index e084aa564..180927ac6 100644 --- a/actix-http/src/encoding/encoder.rs +++ b/actix-http/src/encoding/encoder.rs @@ -50,10 +50,21 @@ impl Encoder { } } + fn empty() -> Self { + Encoder { + body: EncoderBody::Full { body: Bytes::new() }, + encoder: None, + fut: None, + eof: true, + } + } + pub fn response(encoding: ContentEncoding, head: &mut ResponseHead, body: B) -> Self { - // no need to compress an empty body - if matches!(body.size(), BodySize::None | BodySize::Sized(0)) { - return Self::none(); + // no need to compress empty bodies + match body.size() { + BodySize::None => return Self::none(), + BodySize::Sized(0) => return Self::empty(), + _ => {} } let should_encode = !(head.headers().contains_key(&CONTENT_ENCODING) diff --git a/actix-http/src/error.rs b/actix-http/src/error.rs index fbd2eb7ae..6f332118e 100644 --- a/actix-http/src/error.rs +++ b/actix-http/src/error.rs @@ -3,7 +3,7 @@ use std::{error::Error as StdError, fmt, io, str::Utf8Error, string::FromUtf8Error}; use derive_more::{Display, Error, From}; -pub use http::Error as HttpError; +pub use http::{status::InvalidStatusCode, Error as HttpError}; use http::{uri::InvalidUri, StatusCode}; use crate::{body::BoxBody, Response}; @@ -399,9 +399,7 @@ pub enum ContentTypeError { #[cfg(test)] mod tests { - use std::io; - - use http::{Error as HttpError, StatusCode}; + use http::Error as HttpError; use super::*; diff --git a/actix-http/src/h1/codec.rs b/actix-http/src/h1/codec.rs index 8dae2e43e..2b452f8f8 100644 --- a/actix-http/src/h1/codec.rs +++ b/actix-http/src/h1/codec.rs @@ -198,9 +198,6 @@ impl Encoder, BodySize)>> for Codec { #[cfg(test)] mod tests { - use bytes::BytesMut; - use http::Method; - use super::*; use crate::HttpMessage as _; diff --git a/actix-http/src/h1/decoder.rs b/actix-http/src/h1/decoder.rs index 5c26515f7..af64e8802 100644 --- a/actix-http/src/h1/decoder.rs +++ b/actix-http/src/h1/decoder.rs @@ -563,15 +563,8 @@ impl Decoder for PayloadDecoder { #[cfg(test)] mod tests { - use bytes::{Bytes, BytesMut}; - use http::{Method, Version}; - use super::*; - use crate::{ - error::ParseError, - header::{HeaderName, SET_COOKIE}, - HttpMessage as _, - }; + use crate::{header::SET_COOKIE, HttpMessage as _}; impl PayloadType { pub(crate) fn unwrap(self) -> PayloadDecoder { diff --git a/actix-http/src/h1/dispatcher.rs b/actix-http/src/h1/dispatcher.rs index 270707807..00b51360e 100644 --- a/actix-http/src/h1/dispatcher.rs +++ b/actix-http/src/h1/dispatcher.rs @@ -512,8 +512,10 @@ where } Poll::Ready(Some(Err(err))) => { + let err = err.into(); + tracing::error!("Response payload stream error: {err:?}"); this.flags.insert(Flags::FINISHED); - return Err(DispatchError::Body(err.into())); + return Err(DispatchError::Body(err)); } Poll::Pending => return Ok(PollResponse::DoNothing), @@ -549,6 +551,7 @@ where } Poll::Ready(Some(Err(err))) => { + tracing::error!("Response payload stream error: {err:?}"); this.flags.insert(Flags::FINISHED); return Err(DispatchError::Body( Error::new_body().with_cause(err).into(), @@ -703,7 +706,7 @@ where req.head_mut().peer_addr = *this.peer_addr; - req.conn_data = this.conn_data.as_ref().map(Rc::clone); + req.conn_data.clone_from(this.conn_data); match this.codec.message_type() { // request has no payload diff --git a/actix-http/src/h1/service.rs b/actix-http/src/h1/service.rs index 3b27e3db5..4fbccf844 100644 --- a/actix-http/src/h1/service.rs +++ b/actix-http/src/h1/service.rs @@ -153,7 +153,7 @@ mod openssl { } #[cfg(feature = "rustls-0_20")] -mod rustls_020 { +mod rustls_0_20 { use std::io; use actix_service::ServiceFactoryExt as _; @@ -214,7 +214,7 @@ mod rustls_020 { } #[cfg(feature = "rustls-0_21")] -mod rustls_021 { +mod rustls_0_21 { use std::io; use actix_service::ServiceFactoryExt as _; @@ -274,6 +274,128 @@ mod rustls_021 { } } +#[cfg(feature = "rustls-0_22")] +mod rustls_0_22 { + use std::io; + + use actix_service::ServiceFactoryExt as _; + use actix_tls::accept::{ + rustls_0_22::{reexports::ServerConfig, Acceptor, TlsStream}, + TlsError, + }; + + use super::*; + + impl H1Service, S, B, X, U> + where + S: ServiceFactory, + S::Future: 'static, + S::Error: Into>, + S::InitError: fmt::Debug, + S::Response: Into>, + + B: MessageBody, + + X: ServiceFactory, + X::Future: 'static, + X::Error: Into>, + X::InitError: fmt::Debug, + + U: ServiceFactory< + (Request, Framed, Codec>), + Config = (), + Response = (), + >, + U::Future: 'static, + U::Error: fmt::Display + Into>, + U::InitError: fmt::Debug, + { + /// Create Rustls v0.22 based service. + pub fn rustls_0_22( + self, + config: ServerConfig, + ) -> impl ServiceFactory< + TcpStream, + Config = (), + Response = (), + Error = TlsError, + InitError = (), + > { + Acceptor::new(config) + .map_init_err(|_| { + unreachable!("TLS acceptor service factory does not error on init") + }) + .map_err(TlsError::into_service_error) + .map(|io: TlsStream| { + let peer_addr = io.get_ref().0.peer_addr().ok(); + (io, peer_addr) + }) + .and_then(self.map_err(TlsError::Service)) + } + } +} + +#[cfg(feature = "rustls-0_23")] +mod rustls_0_23 { + use std::io; + + use actix_service::ServiceFactoryExt as _; + use actix_tls::accept::{ + rustls_0_23::{reexports::ServerConfig, Acceptor, TlsStream}, + TlsError, + }; + + use super::*; + + impl H1Service, S, B, X, U> + where + S: ServiceFactory, + S::Future: 'static, + S::Error: Into>, + S::InitError: fmt::Debug, + S::Response: Into>, + + B: MessageBody, + + X: ServiceFactory, + X::Future: 'static, + X::Error: Into>, + X::InitError: fmt::Debug, + + U: ServiceFactory< + (Request, Framed, Codec>), + Config = (), + Response = (), + >, + U::Future: 'static, + U::Error: fmt::Display + Into>, + U::InitError: fmt::Debug, + { + /// Create Rustls v0.23 based service. + pub fn rustls_0_23( + self, + config: ServerConfig, + ) -> impl ServiceFactory< + TcpStream, + Config = (), + Response = (), + Error = TlsError, + InitError = (), + > { + Acceptor::new(config) + .map_init_err(|_| { + unreachable!("TLS acceptor service factory does not error on init") + }) + .map_err(TlsError::into_service_error) + .map(|io: TlsStream| { + let peer_addr = io.get_ref().0.peer_addr().ok(); + (io, peer_addr) + }) + .and_then(self.map_err(TlsError::Service)) + } + } +} + impl H1Service where S: ServiceFactory, @@ -419,6 +541,6 @@ where fn call(&self, (io, addr): (T, Option)) -> Self::Future { let conn_data = OnConnectData::from_io(&io, self.on_connect_ext.as_deref()); - Dispatcher::new(io, self.flow.clone(), self.cfg.clone(), addr, conn_data) + Dispatcher::new(io, Rc::clone(&self.flow), self.cfg.clone(), addr, conn_data) } } diff --git a/actix-http/src/h2/dispatcher.rs b/actix-http/src/h2/dispatcher.rs index 3e618820e..400476c88 100644 --- a/actix-http/src/h2/dispatcher.rs +++ b/actix-http/src/h2/dispatcher.rs @@ -4,7 +4,7 @@ use std::{ future::Future, marker::PhantomData, net, - pin::Pin, + pin::{pin, Pin}, rc::Rc, task::{Context, Poll}, }; @@ -20,7 +20,6 @@ use h2::{ Ping, PingPong, }; use pin_project_lite::pin_project; -use tracing::{error, trace, warn}; use crate::{ body::{BodySize, BoxBody, MessageBody}, @@ -127,7 +126,7 @@ where head.headers = parts.headers.into(); head.peer_addr = this.peer_addr; - req.conn_data = this.conn_data.as_ref().map(Rc::clone); + req.conn_data.clone_from(&this.conn_data); let fut = this.flow.service.call(req); let config = this.config.clone(); @@ -147,11 +146,13 @@ where if let Err(err) = res { match err { DispatchError::SendResponse(err) => { - trace!("Error sending HTTP/2 response: {:?}", err) + tracing::trace!("Error sending response: {err:?}"); + } + DispatchError::SendData(err) => { + tracing::warn!("Send data error: {err:?}"); } - DispatchError::SendData(err) => warn!("{:?}", err), DispatchError::ResponseBody(err) => { - error!("Response payload stream error: {:?}", err) + tracing::error!("Response payload stream error: {err:?}"); } } } @@ -228,9 +229,9 @@ where return Ok(()); } - // poll response body and send chunks to client - actix_rt::pin!(body); + let mut body = pin!(body); + // poll response body and send chunks to client while let Some(res) = poll_fn(|cx| body.as_mut().poll_next(cx)).await { let mut chunk = res.map_err(|err| DispatchError::ResponseBody(err.into()))?; diff --git a/actix-http/src/h2/service.rs b/actix-http/src/h2/service.rs index 0ae7ea682..debc73e59 100644 --- a/actix-http/src/h2/service.rs +++ b/actix-http/src/h2/service.rs @@ -141,7 +141,7 @@ mod openssl { } #[cfg(feature = "rustls-0_20")] -mod rustls_020 { +mod rustls_0_20 { use std::io; use actix_service::ServiceFactoryExt as _; @@ -192,7 +192,7 @@ mod rustls_020 { } #[cfg(feature = "rustls-0_21")] -mod rustls_021 { +mod rustls_0_21 { use std::io; use actix_service::ServiceFactoryExt as _; @@ -242,6 +242,108 @@ mod rustls_021 { } } +#[cfg(feature = "rustls-0_22")] +mod rustls_0_22 { + use std::io; + + use actix_service::ServiceFactoryExt as _; + use actix_tls::accept::{ + rustls_0_22::{reexports::ServerConfig, Acceptor, TlsStream}, + TlsError, + }; + + use super::*; + + impl H2Service, S, B> + where + S: ServiceFactory, + S::Future: 'static, + S::Error: Into> + 'static, + S::Response: Into> + 'static, + >::Future: 'static, + + B: MessageBody + 'static, + { + /// Create Rustls v0.22 based service. + pub fn rustls_0_22( + self, + mut config: ServerConfig, + ) -> impl ServiceFactory< + TcpStream, + Config = (), + Response = (), + Error = TlsError, + InitError = S::InitError, + > { + let mut protos = vec![b"h2".to_vec()]; + protos.extend_from_slice(&config.alpn_protocols); + config.alpn_protocols = protos; + + Acceptor::new(config) + .map_init_err(|_| { + unreachable!("TLS acceptor service factory does not error on init") + }) + .map_err(TlsError::into_service_error) + .map(|io: TlsStream| { + let peer_addr = io.get_ref().0.peer_addr().ok(); + (io, peer_addr) + }) + .and_then(self.map_err(TlsError::Service)) + } + } +} + +#[cfg(feature = "rustls-0_23")] +mod rustls_0_23 { + use std::io; + + use actix_service::ServiceFactoryExt as _; + use actix_tls::accept::{ + rustls_0_23::{reexports::ServerConfig, Acceptor, TlsStream}, + TlsError, + }; + + use super::*; + + impl H2Service, S, B> + where + S: ServiceFactory, + S::Future: 'static, + S::Error: Into> + 'static, + S::Response: Into> + 'static, + >::Future: 'static, + + B: MessageBody + 'static, + { + /// Create Rustls v0.23 based service. + pub fn rustls_0_23( + self, + mut config: ServerConfig, + ) -> impl ServiceFactory< + TcpStream, + Config = (), + Response = (), + Error = TlsError, + InitError = S::InitError, + > { + let mut protos = vec![b"h2".to_vec()]; + protos.extend_from_slice(&config.alpn_protocols); + config.alpn_protocols = protos; + + Acceptor::new(config) + .map_init_err(|_| { + unreachable!("TLS acceptor service factory does not error on init") + }) + .map_err(TlsError::into_service_error) + .map(|io: TlsStream| { + let peer_addr = io.get_ref().0.peer_addr().ok(); + (io, peer_addr) + }) + .and_then(self.map_err(TlsError::Service)) + } + } +} + impl ServiceFactory<(T, Option)> for H2Service where T: AsyncRead + AsyncWrite + Unpin + 'static, @@ -332,7 +434,7 @@ where H2ServiceHandlerResponse { state: State::Handshake( - Some(self.flow.clone()), + Some(Rc::clone(&self.flow)), Some(self.cfg.clone()), addr, on_connect_data, diff --git a/actix-http/src/header/map.rs b/actix-http/src/header/map.rs index d8a63b573..6da01d2c0 100644 --- a/actix-http/src/header/map.rs +++ b/actix-http/src/header/map.rs @@ -13,8 +13,9 @@ use super::AsHeaderName; /// `HeaderMap` is a "multi-map" of [`HeaderName`] to one or more [`HeaderValue`]s. /// /// # Examples +/// /// ``` -/// use actix_http::header::{self, HeaderMap, HeaderValue}; +/// # use actix_http::header::{self, HeaderMap, HeaderValue}; /// /// let mut map = HeaderMap::new(); /// @@ -29,6 +30,21 @@ use super::AsHeaderName; /// /// assert!(!map.contains_key(header::ORIGIN)); /// ``` +/// +/// Construct a header map using the [`FromIterator`] implementation. Note that it uses the append +/// strategy, so duplicate header names are preserved. +/// +/// ``` +/// use actix_http::header::{self, HeaderMap, HeaderValue}; +/// +/// let headers = HeaderMap::from_iter([ +/// (header::CONTENT_TYPE, HeaderValue::from_static("text/plain")), +/// (header::COOKIE, HeaderValue::from_static("foo=1")), +/// (header::COOKIE, HeaderValue::from_static("bar=1")), +/// ]); +/// +/// assert_eq!(headers.len(), 3); +/// ``` #[derive(Debug, Clone, Default)] pub struct HeaderMap { pub(crate) inner: AHashMap, @@ -368,8 +384,8 @@ impl HeaderMap { /// let removed = map.insert(header::ACCEPT, HeaderValue::from_static("text/html")); /// assert!(!removed.is_empty()); /// ``` - pub fn insert(&mut self, key: HeaderName, val: HeaderValue) -> Removed { - let value = self.inner.insert(key, Value::one(val)); + pub fn insert(&mut self, name: HeaderName, val: HeaderValue) -> Removed { + let value = self.inner.insert(name, Value::one(val)); Removed::new(value) } @@ -636,6 +652,16 @@ impl<'a> IntoIterator for &'a HeaderMap { } } +impl FromIterator<(HeaderName, HeaderValue)> for HeaderMap { + fn from_iter>(iter: T) -> Self { + iter.into_iter() + .fold(Self::new(), |mut map, (name, value)| { + map.append(name, value); + map + }) + } +} + /// Convert a `http::HeaderMap` to our `HeaderMap`. impl From for HeaderMap { fn from(mut map: http::HeaderMap) -> Self { @@ -650,6 +676,13 @@ impl From for http::HeaderMap { } } +/// Convert our `&HeaderMap` to a `http::HeaderMap`. +impl From<&HeaderMap> for http::HeaderMap { + fn from(map: &HeaderMap) -> Self { + map.to_owned().into() + } +} + /// Iterator over removed, owned values with the same associated name. /// /// Returned from methods that remove or replace items. See [`HeaderMap::insert`] diff --git a/actix-http/src/header/shared/http_date.rs b/actix-http/src/header/shared/http_date.rs index 21ed49f0c..bdfbc7051 100644 --- a/actix-http/src/header/shared/http_date.rs +++ b/actix-http/src/header/shared/http_date.rs @@ -24,8 +24,7 @@ impl FromStr for HttpDate { impl fmt::Display for HttpDate { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - let date_str = httpdate::fmt_http_date(self.0); - f.write_str(&date_str) + httpdate::HttpDate::from(self.0).fmt(f) } } @@ -37,7 +36,7 @@ impl TryIntoHeaderValue for HttpDate { let mut wrt = MutWriter(&mut buf); // unwrap: date output is known to be well formed and of known length - write!(wrt, "{}", httpdate::fmt_http_date(self.0)).unwrap(); + write!(wrt, "{}", self).unwrap(); HeaderValue::from_maybe_shared(buf.split().freeze()) } diff --git a/actix-http/src/header/utils.rs b/actix-http/src/header/utils.rs index f4f34d347..caaab3b1e 100644 --- a/actix-http/src/header/utils.rs +++ b/actix-http/src/header/utils.rs @@ -80,18 +80,18 @@ mod tests { #[test] fn comma_delimited_parsing() { - let headers = vec![]; + let headers = []; let res: Vec = from_comma_delimited(headers.iter()).unwrap(); assert_eq!(res, vec![0; 0]); - let headers = vec![ + let headers = [ HeaderValue::from_static("1, 2"), HeaderValue::from_static("3,4"), ]; let res: Vec = from_comma_delimited(headers.iter()).unwrap(); assert_eq!(res, vec![1, 2, 3, 4]); - let headers = vec![ + let headers = [ HeaderValue::from_static(""), HeaderValue::from_static(","), HeaderValue::from_static(" "), diff --git a/actix-http/src/lib.rs b/actix-http/src/lib.rs index 888c3e06f..734e6e1e1 100644 --- a/actix-http/src/lib.rs +++ b/actix-http/src/lib.rs @@ -6,7 +6,10 @@ //! | ------------------- | ------------------------------------------- | //! | `http2` | HTTP/2 support via [h2]. | //! | `openssl` | TLS support via [OpenSSL]. | -//! | `rustls` | TLS support via [rustls]. | +//! | `rustls-0_20` | TLS support via rustls 0.20. | +//! | `rustls-0_21` | TLS support via rustls 0.21. | +//! | `rustls-0_22` | TLS support via rustls 0.22. | +//! | `rustls-0_23` | TLS support via [rustls] 0.23. | //! | `compress-brotli` | Payload compression support: Brotli. | //! | `compress-gzip` | Payload compression support: Deflate, Gzip. | //! | `compress-zstd` | Payload compression support: Zstd. | @@ -17,8 +20,6 @@ //! [rustls]: https://crates.io/crates/rustls //! [trust-dns]: https://crates.io/crates/trust-dns -#![deny(rust_2018_idioms, nonstandard_style)] -#![warn(future_incompatible)] #![allow( clippy::type_complexity, clippy::too_many_arguments, @@ -28,7 +29,7 @@ #![doc(html_favicon_url = "https://actix.rs/favicon.ico")] #![cfg_attr(docsrs, feature(doc_auto_cfg))] -pub use ::http::{uri, uri::Uri, Method, StatusCode, Version}; +pub use http::{uri, uri::Uri, Method, StatusCode, Version}; pub mod body; mod builder; @@ -58,7 +59,7 @@ pub mod ws; #[allow(deprecated)] pub use self::payload::PayloadStream; -#[cfg(any(feature = "openssl", feature = "rustls-0_20", feature = "rustls-0_21"))] +#[cfg(feature = "__tls")] pub use self::service::TlsAcceptorConfig; pub use self::{ builder::HttpServiceBuilder, diff --git a/actix-http/src/message.rs b/actix-http/src/message.rs index 47b128fd0..d2241b229 100644 --- a/actix-http/src/message.rs +++ b/actix-http/src/message.rs @@ -66,7 +66,7 @@ impl ops::DerefMut for Message { impl Drop for Message { fn drop(&mut self) { - T::with_pool(|p| p.release(self.head.clone())) + T::with_pool(|p| p.release(Rc::clone(&self.head))) } } diff --git a/actix-http/src/notify_on_drop.rs b/actix-http/src/notify_on_drop.rs index 98544bb5d..95904b28e 100644 --- a/actix-http/src/notify_on_drop.rs +++ b/actix-http/src/notify_on_drop.rs @@ -5,7 +5,7 @@ use std::cell::RefCell; thread_local! { - static NOTIFY_DROPPED: RefCell> = RefCell::new(None); + static NOTIFY_DROPPED: RefCell> = const { RefCell::new(None) }; } /// Check if the spawned task is dropped. diff --git a/actix-http/src/requests/head.rs b/actix-http/src/requests/head.rs index 4558801f3..9ceb2a20c 100644 --- a/actix-http/src/requests/head.rs +++ b/actix-http/src/requests/head.rs @@ -16,7 +16,10 @@ pub struct RequestHead { pub uri: Uri, pub version: Version, pub headers: HeaderMap, + + /// Will only be None when called in unit tests unless set manually. pub peer_addr: Option, + flags: Flags, } diff --git a/actix-http/src/requests/request.rs b/actix-http/src/requests/request.rs index 1750fb2f7..6a267a7a6 100644 --- a/actix-http/src/requests/request.rs +++ b/actix-http/src/requests/request.rs @@ -173,7 +173,7 @@ impl

Request

{ /// Peer address is the directly connected peer's socket address. If a proxy is used in front of /// the Actix Web server, then it would be address of this proxy. /// - /// Will only return None when called in unit tests. + /// Will only return None when called in unit tests unless set manually. #[inline] pub fn peer_addr(&self) -> Option { self.head().peer_addr diff --git a/actix-http/src/responses/builder.rs b/actix-http/src/responses/builder.rs index 91c69ba54..bb7d0f712 100644 --- a/actix-http/src/responses/builder.rs +++ b/actix-http/src/responses/builder.rs @@ -351,12 +351,9 @@ mod tests { assert_eq!(resp.headers().get(CONTENT_TYPE).unwrap(), "text/plain"); let resp = Response::build(StatusCode::OK) - .content_type(mime::APPLICATION_JAVASCRIPT_UTF_8) + .content_type(mime::TEXT_JAVASCRIPT) .body(Bytes::new()); - assert_eq!( - resp.headers().get(CONTENT_TYPE).unwrap(), - "application/javascript; charset=utf-8" - ); + assert_eq!(resp.headers().get(CONTENT_TYPE).unwrap(), "text/javascript"); } #[test] diff --git a/actix-http/src/service.rs b/actix-http/src/service.rs index fb38ba636..3ea88274a 100644 --- a/actix-http/src/service.rs +++ b/actix-http/src/service.rs @@ -241,13 +241,13 @@ where } /// Configuration options used when accepting TLS connection. -#[cfg(any(feature = "openssl", feature = "rustls-0_20", feature = "rustls-0_21"))] +#[cfg(feature = "__tls")] #[derive(Debug, Default)] pub struct TlsAcceptorConfig { pub(crate) handshake_timeout: Option, } -#[cfg(any(feature = "openssl", feature = "rustls-0_20", feature = "rustls-0_21"))] +#[cfg(feature = "__tls")] impl TlsAcceptorConfig { /// Set TLS handshake timeout duration. pub fn handshake_timeout(self, dur: std::time::Duration) -> Self { @@ -353,12 +353,12 @@ mod openssl { } #[cfg(feature = "rustls-0_20")] -mod rustls_020 { +mod rustls_0_20 { use std::io; use actix_service::ServiceFactoryExt as _; use actix_tls::accept::{ - rustls::{reexports::ServerConfig, Acceptor, TlsStream}, + rustls_0_20::{reexports::ServerConfig, Acceptor, TlsStream}, TlsError, }; @@ -389,7 +389,7 @@ mod rustls_020 { U::Error: fmt::Display + Into>, U::InitError: fmt::Debug, { - /// Create Rustls based service. + /// Create Rustls v0.20 based service. pub fn rustls( self, config: ServerConfig, @@ -403,7 +403,7 @@ mod rustls_020 { self.rustls_with_config(config, TlsAcceptorConfig::default()) } - /// Create Rustls based service with custom TLS acceptor configuration. + /// Create Rustls v0.20 based service with custom TLS acceptor configuration. pub fn rustls_with_config( self, mut config: ServerConfig, @@ -449,7 +449,7 @@ mod rustls_020 { } #[cfg(feature = "rustls-0_21")] -mod rustls_021 { +mod rustls_0_21 { use std::io; use actix_service::ServiceFactoryExt as _; @@ -485,7 +485,7 @@ mod rustls_021 { U::Error: fmt::Display + Into>, U::InitError: fmt::Debug, { - /// Create Rustls based service. + /// Create Rustls v0.21 based service. pub fn rustls_021( self, config: ServerConfig, @@ -499,7 +499,7 @@ mod rustls_021 { self.rustls_021_with_config(config, TlsAcceptorConfig::default()) } - /// Create Rustls based service with custom TLS acceptor configuration. + /// Create Rustls v0.21 based service with custom TLS acceptor configuration. pub fn rustls_021_with_config( self, mut config: ServerConfig, @@ -544,6 +544,198 @@ mod rustls_021 { } } +#[cfg(feature = "rustls-0_22")] +mod rustls_0_22 { + use std::io; + + use actix_service::ServiceFactoryExt as _; + use actix_tls::accept::{ + rustls_0_22::{reexports::ServerConfig, Acceptor, TlsStream}, + TlsError, + }; + + use super::*; + + impl HttpService, S, B, X, U> + where + S: ServiceFactory, + S::Future: 'static, + S::Error: Into> + 'static, + S::InitError: fmt::Debug, + S::Response: Into> + 'static, + >::Future: 'static, + + B: MessageBody + 'static, + + X: ServiceFactory, + X::Future: 'static, + X::Error: Into>, + X::InitError: fmt::Debug, + + U: ServiceFactory< + (Request, Framed, h1::Codec>), + Config = (), + Response = (), + >, + U::Future: 'static, + U::Error: fmt::Display + Into>, + U::InitError: fmt::Debug, + { + /// Create Rustls v0.22 based service. + pub fn rustls_0_22( + self, + config: ServerConfig, + ) -> impl ServiceFactory< + TcpStream, + Config = (), + Response = (), + Error = TlsError, + InitError = (), + > { + self.rustls_0_22_with_config(config, TlsAcceptorConfig::default()) + } + + /// Create Rustls v0.22 based service with custom TLS acceptor configuration. + pub fn rustls_0_22_with_config( + self, + mut config: ServerConfig, + tls_acceptor_config: TlsAcceptorConfig, + ) -> impl ServiceFactory< + TcpStream, + Config = (), + Response = (), + Error = TlsError, + InitError = (), + > { + let mut protos = vec![b"h2".to_vec(), b"http/1.1".to_vec()]; + protos.extend_from_slice(&config.alpn_protocols); + config.alpn_protocols = protos; + + let mut acceptor = Acceptor::new(config); + + if let Some(handshake_timeout) = tls_acceptor_config.handshake_timeout { + acceptor.set_handshake_timeout(handshake_timeout); + } + + acceptor + .map_init_err(|_| { + unreachable!("TLS acceptor service factory does not error on init") + }) + .map_err(TlsError::into_service_error) + .and_then(|io: TlsStream| async { + let proto = if let Some(protos) = io.get_ref().1.alpn_protocol() { + if protos.windows(2).any(|window| window == b"h2") { + Protocol::Http2 + } else { + Protocol::Http1 + } + } else { + Protocol::Http1 + }; + let peer_addr = io.get_ref().0.peer_addr().ok(); + Ok((io, proto, peer_addr)) + }) + .and_then(self.map_err(TlsError::Service)) + } + } +} + +#[cfg(feature = "rustls-0_23")] +mod rustls_0_23 { + use std::io; + + use actix_service::ServiceFactoryExt as _; + use actix_tls::accept::{ + rustls_0_23::{reexports::ServerConfig, Acceptor, TlsStream}, + TlsError, + }; + + use super::*; + + impl HttpService, S, B, X, U> + where + S: ServiceFactory, + S::Future: 'static, + S::Error: Into> + 'static, + S::InitError: fmt::Debug, + S::Response: Into> + 'static, + >::Future: 'static, + + B: MessageBody + 'static, + + X: ServiceFactory, + X::Future: 'static, + X::Error: Into>, + X::InitError: fmt::Debug, + + U: ServiceFactory< + (Request, Framed, h1::Codec>), + Config = (), + Response = (), + >, + U::Future: 'static, + U::Error: fmt::Display + Into>, + U::InitError: fmt::Debug, + { + /// Create Rustls v0.23 based service. + pub fn rustls_0_23( + self, + config: ServerConfig, + ) -> impl ServiceFactory< + TcpStream, + Config = (), + Response = (), + Error = TlsError, + InitError = (), + > { + self.rustls_0_23_with_config(config, TlsAcceptorConfig::default()) + } + + /// Create Rustls v0.23 based service with custom TLS acceptor configuration. + pub fn rustls_0_23_with_config( + self, + mut config: ServerConfig, + tls_acceptor_config: TlsAcceptorConfig, + ) -> impl ServiceFactory< + TcpStream, + Config = (), + Response = (), + Error = TlsError, + InitError = (), + > { + let mut protos = vec![b"h2".to_vec(), b"http/1.1".to_vec()]; + protos.extend_from_slice(&config.alpn_protocols); + config.alpn_protocols = protos; + + let mut acceptor = Acceptor::new(config); + + if let Some(handshake_timeout) = tls_acceptor_config.handshake_timeout { + acceptor.set_handshake_timeout(handshake_timeout); + } + + acceptor + .map_init_err(|_| { + unreachable!("TLS acceptor service factory does not error on init") + }) + .map_err(TlsError::into_service_error) + .and_then(|io: TlsStream| async { + let proto = if let Some(protos) = io.get_ref().1.alpn_protocol() { + if protos.windows(2).any(|window| window == b"h2") { + Protocol::Http2 + } else { + Protocol::Http1 + } + } else { + Protocol::Http1 + }; + let peer_addr = io.get_ref().0.peer_addr().ok(); + Ok((io, proto, peer_addr)) + }) + .and_then(self.map_err(TlsError::Service)) + } + } +} + impl ServiceFactory<(T, Protocol, Option)> for HttpService where @@ -718,7 +910,7 @@ where handshake: Some(( crate::h2::handshake_with_timeout(io, &self.cfg), self.cfg.clone(), - self.flow.clone(), + Rc::clone(&self.flow), conn_data, peer_addr, )), @@ -734,7 +926,7 @@ where state: State::H1 { dispatcher: h1::Dispatcher::new( io, - self.flow.clone(), + Rc::clone(&self.flow), self.cfg.clone(), peer_addr, conn_data, diff --git a/actix-http/src/test.rs b/actix-http/src/test.rs index 3815e64c6..dfa9a86c9 100644 --- a/actix-http/src/test.rs +++ b/actix-http/src/test.rs @@ -159,8 +159,8 @@ impl TestBuffer { #[allow(dead_code)] pub(crate) fn clone(&self) -> Self { Self { - read_buf: self.read_buf.clone(), - write_buf: self.write_buf.clone(), + read_buf: Rc::clone(&self.read_buf), + write_buf: Rc::clone(&self.write_buf), err: self.err.clone(), } } diff --git a/actix-http/src/ws/frame.rs b/actix-http/src/ws/frame.rs index c9fb0cde9..35b3f8e66 100644 --- a/actix-http/src/ws/frame.rs +++ b/actix-http/src/ws/frame.rs @@ -178,14 +178,14 @@ impl Parser { }; if payload_len < 126 { - dst.reserve(p_len + 2 + if mask { 4 } else { 0 }); + dst.reserve(p_len + 2); dst.put_slice(&[one, two | payload_len as u8]); } else if payload_len <= 65_535 { - dst.reserve(p_len + 4 + if mask { 4 } else { 0 }); + dst.reserve(p_len + 4); dst.put_slice(&[one, two | 126]); dst.put_u16(payload_len as u16); } else { - dst.reserve(p_len + 10 + if mask { 4 } else { 0 }); + dst.reserve(p_len + 10); dst.put_slice(&[one, two | 127]); dst.put_u64(payload_len as u64); }; diff --git a/actix-http/src/ws/mod.rs b/actix-http/src/ws/mod.rs index 87f9b38f3..3ed53b70a 100644 --- a/actix-http/src/ws/mod.rs +++ b/actix-http/src/ws/mod.rs @@ -221,7 +221,7 @@ pub fn handshake_response(req: &RequestHead) -> ResponseBuilder { #[cfg(test)] mod tests { use super::*; - use crate::{header, test::TestRequest, Method}; + use crate::{header, test::TestRequest}; #[test] fn test_handshake() { diff --git a/actix-http/src/ws/proto.rs b/actix-http/src/ws/proto.rs index 0653c00b0..27815eaf2 100644 --- a/actix-http/src/ws/proto.rs +++ b/actix-http/src/ws/proto.rs @@ -1,7 +1,4 @@ -use std::{ - convert::{From, Into}, - fmt, -}; +use std::fmt; use base64::prelude::*; use tracing::error; diff --git a/actix-http/tests/test_openssl.rs b/actix-http/tests/test_openssl.rs index cb16a4fec..4dd22b585 100644 --- a/actix-http/tests/test_openssl.rs +++ b/actix-http/tests/test_openssl.rs @@ -42,9 +42,11 @@ where } fn tls_config() -> SslAcceptor { - let cert = rcgen::generate_simple_self_signed(vec!["localhost".to_owned()]).unwrap(); - let cert_file = cert.serialize_pem().unwrap(); - let key_file = cert.serialize_private_key_pem(); + let rcgen::CertifiedKey { cert, key_pair } = + rcgen::generate_simple_self_signed(["localhost".to_owned()]).unwrap(); + let cert_file = cert.pem(); + let key_file = key_pair.serialize_pem(); + let cert = X509::from_pem(cert_file.as_bytes()).unwrap(); let key = PKey::private_key_from_pem(key_file.as_bytes()).unwrap(); diff --git a/actix-http/tests/test_rustls.rs b/actix-http/tests/test_rustls.rs index c94e579e5..3ca0d94c2 100644 --- a/actix-http/tests/test_rustls.rs +++ b/actix-http/tests/test_rustls.rs @@ -1,6 +1,6 @@ -#![cfg(feature = "rustls-0_21")] +#![cfg(feature = "rustls-0_23")] -extern crate tls_rustls_021 as rustls; +extern crate tls_rustls_023 as rustls; use std::{ convert::Infallible, @@ -20,13 +20,13 @@ use actix_http::{ use actix_http_test::test_server; use actix_rt::pin; use actix_service::{fn_factory_with_config, fn_service}; -use actix_tls::connect::rustls_0_21::webpki_roots_cert_store; +use actix_tls::connect::rustls_0_23::webpki_roots_cert_store; use actix_utils::future::{err, ok, poll_fn}; use bytes::{Bytes, BytesMut}; use derive_more::{Display, Error}; use futures_core::{ready, Stream}; use futures_util::stream::once; -use rustls::{Certificate, PrivateKey, ServerConfig as RustlsServerConfig, ServerName}; +use rustls::{pki_types::ServerName, ServerConfig as RustlsServerConfig}; use rustls_pemfile::{certs, pkcs8_private_keys}; async fn load_body(stream: S) -> Result @@ -52,24 +52,25 @@ where } fn tls_config() -> RustlsServerConfig { - let cert = rcgen::generate_simple_self_signed(vec!["localhost".to_owned()]).unwrap(); - let cert_file = cert.serialize_pem().unwrap(); - let key_file = cert.serialize_private_key_pem(); + let rcgen::CertifiedKey { cert, key_pair } = + rcgen::generate_simple_self_signed(["localhost".to_owned()]).unwrap(); + let cert_file = cert.pem(); + let key_file = key_pair.serialize_pem(); let cert_file = &mut BufReader::new(cert_file.as_bytes()); let key_file = &mut BufReader::new(key_file.as_bytes()); - let cert_chain = certs(cert_file) - .unwrap() - .into_iter() - .map(Certificate) - .collect(); - let mut keys = pkcs8_private_keys(key_file).unwrap(); + let cert_chain = certs(cert_file).collect::, _>>().unwrap(); + let mut keys = pkcs8_private_keys(key_file) + .collect::, _>>() + .unwrap(); let mut config = RustlsServerConfig::builder() - .with_safe_defaults() .with_no_client_auth() - .with_single_cert(cert_chain, PrivateKey(keys.remove(0))) + .with_single_cert( + cert_chain, + rustls::pki_types::PrivateKeyDer::Pkcs8(keys.remove(0)), + ) .unwrap(); config.alpn_protocols.push(HTTP1_1_ALPN_PROTOCOL.to_vec()); @@ -83,7 +84,6 @@ pub fn get_negotiated_alpn_protocol( client_alpn_protocol: &[u8], ) -> Option> { let mut config = rustls::ClientConfig::builder() - .with_safe_defaults() .with_root_certificates(webpki_roots_cert_store()) .with_no_client_auth(); @@ -109,7 +109,7 @@ async fn h1() -> io::Result<()> { let srv = test_server(move || { HttpService::build() .h1(|_| ok::<_, Error>(Response::ok())) - .rustls_021(tls_config()) + .rustls_0_23(tls_config()) }) .await; @@ -123,7 +123,7 @@ async fn h2() -> io::Result<()> { let srv = test_server(move || { HttpService::build() .h2(|_| ok::<_, Error>(Response::ok())) - .rustls_021(tls_config()) + .rustls_0_23(tls_config()) }) .await; @@ -141,7 +141,7 @@ async fn h1_1() -> io::Result<()> { assert_eq!(req.version(), Version::HTTP_11); ok::<_, Error>(Response::ok()) }) - .rustls_021(tls_config()) + .rustls_0_23(tls_config()) }) .await; @@ -159,7 +159,7 @@ async fn h2_1() -> io::Result<()> { assert_eq!(req.version(), Version::HTTP_2); ok::<_, Error>(Response::ok()) }) - .rustls_021_with_config( + .rustls_0_23_with_config( tls_config(), TlsAcceptorConfig::default().handshake_timeout(Duration::from_secs(5)), ) @@ -180,7 +180,7 @@ async fn h2_body1() -> io::Result<()> { let body = load_body(req.take_payload()).await?; Ok::<_, Error>(Response::ok().set_body(body)) }) - .rustls_021(tls_config()) + .rustls_0_23(tls_config()) }) .await; @@ -206,7 +206,7 @@ async fn h2_content_length() { ]; ok::<_, Infallible>(Response::new(statuses[indx])) }) - .rustls_021(tls_config()) + .rustls_0_23(tls_config()) }) .await; @@ -278,7 +278,7 @@ async fn h2_headers() { } ok::<_, Infallible>(config.body(data.clone())) }) - .rustls_021(tls_config()) + .rustls_0_23(tls_config()) }) .await; @@ -317,7 +317,7 @@ async fn h2_body2() { let mut srv = test_server(move || { HttpService::build() .h2(|_| ok::<_, Infallible>(Response::ok().set_body(STR))) - .rustls_021(tls_config()) + .rustls_0_23(tls_config()) }) .await; @@ -334,7 +334,7 @@ async fn h2_head_empty() { let mut srv = test_server(move || { HttpService::build() .finish(|_| ok::<_, Infallible>(Response::ok().set_body(STR))) - .rustls_021(tls_config()) + .rustls_0_23(tls_config()) }) .await; @@ -360,7 +360,7 @@ async fn h2_head_binary() { let mut srv = test_server(move || { HttpService::build() .h2(|_| ok::<_, Infallible>(Response::ok().set_body(STR))) - .rustls_021(tls_config()) + .rustls_0_23(tls_config()) }) .await; @@ -385,7 +385,7 @@ async fn h2_head_binary2() { let srv = test_server(move || { HttpService::build() .h2(|_| ok::<_, Infallible>(Response::ok().set_body(STR))) - .rustls_021(tls_config()) + .rustls_0_23(tls_config()) }) .await; @@ -411,7 +411,7 @@ async fn h2_body_length() { Response::ok().set_body(SizedStream::new(STR.len() as u64, body)), ) }) - .rustls_021(tls_config()) + .rustls_0_23(tls_config()) }) .await; @@ -435,7 +435,7 @@ async fn h2_body_chunked_explicit() { .body(BodyStream::new(body)), ) }) - .rustls_021(tls_config()) + .rustls_0_23(tls_config()) }) .await; @@ -464,7 +464,7 @@ async fn h2_response_http_error_handling() { ) })) })) - .rustls_021(tls_config()) + .rustls_0_23(tls_config()) }) .await; @@ -494,7 +494,7 @@ async fn h2_service_error() { let mut srv = test_server(move || { HttpService::build() .h2(|_| err::, _>(BadRequest)) - .rustls_021(tls_config()) + .rustls_0_23(tls_config()) }) .await; @@ -511,7 +511,7 @@ async fn h1_service_error() { let mut srv = test_server(move || { HttpService::build() .h1(|_| err::, _>(BadRequest)) - .rustls_021(tls_config()) + .rustls_0_23(tls_config()) }) .await; @@ -534,7 +534,7 @@ async fn alpn_h1() -> io::Result<()> { config.alpn_protocols.push(CUSTOM_ALPN_PROTOCOL.to_vec()); HttpService::build() .h1(|_| ok::<_, Error>(Response::ok())) - .rustls_021(config) + .rustls_0_23(config) }) .await; @@ -556,7 +556,7 @@ async fn alpn_h2() -> io::Result<()> { config.alpn_protocols.push(CUSTOM_ALPN_PROTOCOL.to_vec()); HttpService::build() .h2(|_| ok::<_, Error>(Response::ok())) - .rustls_021(config) + .rustls_0_23(config) }) .await; @@ -582,7 +582,7 @@ async fn alpn_h2_1() -> io::Result<()> { config.alpn_protocols.push(CUSTOM_ALPN_PROTOCOL.to_vec()); HttpService::build() .finish(|_| ok::<_, Error>(Response::ok())) - .rustls_021(config) + .rustls_0_23(config) }) .await; diff --git a/actix-multipart-derive/CHANGES.md b/actix-multipart-derive/CHANGES.md index e36a13d04..d0c759297 100644 --- a/actix-multipart-derive/CHANGES.md +++ b/actix-multipart-derive/CHANGES.md @@ -2,6 +2,10 @@ ## Unreleased +## 0.7.0 + +- Minimum supported Rust version (MSRV) is now 1.72. + ## 0.6.1 - Update `syn` dependency to `2`. diff --git a/actix-multipart-derive/Cargo.toml b/actix-multipart-derive/Cargo.toml index 2f049a3fb..964ef2b74 100644 --- a/actix-multipart-derive/Cargo.toml +++ b/actix-multipart-derive/Cargo.toml @@ -1,13 +1,14 @@ [package] name = "actix-multipart-derive" -version = "0.6.1" +version = "0.7.0" authors = ["Jacob Halsey "] description = "Multipart form derive macro for Actix Web" keywords = ["http", "web", "framework", "async", "futures"] -homepage = "https://actix.rs" -repository = "https://github.com/actix/actix-web" -license = "MIT OR Apache-2.0" -edition = "2021" +homepage.workspace = true +repository.workspace = true +license.workspace = true +edition.workspace = true +rust-version.workspace = true [package.metadata.docs.rs] rustdoc-args = ["--cfg", "docsrs"] @@ -24,7 +25,10 @@ quote = "1" syn = "2" [dev-dependencies] -actix-multipart = "0.6" +actix-multipart = "0.7" actix-web = "4" rustversion = "1" trybuild = "1" + +[lints] +workspace = true diff --git a/actix-multipart-derive/README.md b/actix-multipart-derive/README.md index cd5780c56..bf75613ed 100644 --- a/actix-multipart-derive/README.md +++ b/actix-multipart-derive/README.md @@ -1,17 +1,16 @@ -# actix-multipart-derive +# `actix-multipart-derive` > The derive macro implementation for actix-multipart-derive. + + [![crates.io](https://img.shields.io/crates/v/actix-multipart-derive?label=latest)](https://crates.io/crates/actix-multipart-derive) -[![Documentation](https://docs.rs/actix-multipart-derive/badge.svg?version=0.6.1)](https://docs.rs/actix-multipart-derive/0.6.1) -![Version](https://img.shields.io/badge/rustc-1.68+-ab6000.svg) +[![Documentation](https://docs.rs/actix-multipart-derive/badge.svg?version=0.7.0)](https://docs.rs/actix-multipart-derive/0.7.0) +![Version](https://img.shields.io/badge/rustc-1.72+-ab6000.svg) ![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-multipart-derive.svg)
-[![dependency status](https://deps.rs/crate/actix-multipart-derive/0.6.1/status.svg)](https://deps.rs/crate/actix-multipart-derive/0.6.1) +[![dependency status](https://deps.rs/crate/actix-multipart-derive/0.7.0/status.svg)](https://deps.rs/crate/actix-multipart-derive/0.7.0) [![Download](https://img.shields.io/crates/d/actix-multipart-derive.svg)](https://crates.io/crates/actix-multipart-derive) [![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x) -## Documentation & Resources - -- [API Documentation](https://docs.rs/actix-multipart-derive) -- Minimum Supported Rust Version (MSRV): 1.68 + diff --git a/actix-multipart-derive/src/lib.rs b/actix-multipart-derive/src/lib.rs index 9552ad2d9..6818d477c 100644 --- a/actix-multipart-derive/src/lib.rs +++ b/actix-multipart-derive/src/lib.rs @@ -2,8 +2,6 @@ //! //! See [`macro@MultipartForm`] for usage examples. -#![deny(rust_2018_idioms, nonstandard_style)] -#![warn(future_incompatible)] #![doc(html_logo_url = "https://actix.rs/img/logo.png")] #![doc(html_favicon_url = "https://actix.rs/favicon.ico")] #![cfg_attr(docsrs, feature(doc_auto_cfg))] @@ -138,7 +136,7 @@ struct ParsedField<'t> { /// `#[multipart(duplicate_field = "")]` attribute: /// /// - "ignore": (default) Extra fields are ignored. I.e., the first one is persisted. -/// - "deny": A `MultipartError::UnsupportedField` error response is returned. +/// - "deny": A `MultipartError::UnknownField` error response is returned. /// - "replace": Each field is processed, but only the last one is persisted. /// /// Note that `Vec` fields will ignore this option. @@ -229,7 +227,7 @@ pub fn impl_multipart_form(input: proc_macro::TokenStream) -> proc_macro::TokenS // Return value when a field name is not supported by the form let unknown_field_result = if attrs.deny_unknown_fields { quote!(::std::result::Result::Err( - ::actix_multipart::MultipartError::UnsupportedField(field.name().to_string()) + ::actix_multipart::MultipartError::UnknownField(field.name().unwrap().to_string()) )) } else { quote!(::std::result::Result::Ok(())) @@ -292,7 +290,7 @@ pub fn impl_multipart_form(input: proc_macro::TokenStream) -> proc_macro::TokenS limits: &'t mut ::actix_multipart::form::Limits, state: &'t mut ::actix_multipart::form::State, ) -> ::std::pin::Pin<::std::boxed::Box> + 't>> { - match field.name() { + match field.name().unwrap() { #handle_field_impl _ => return ::std::boxed::Box::pin(::std::future::ready(#unknown_field_result)), } diff --git a/actix-multipart-derive/tests/trybuild.rs b/actix-multipart-derive/tests/trybuild.rs index 88aa619c6..6b25d78df 100644 --- a/actix-multipart-derive/tests/trybuild.rs +++ b/actix-multipart-derive/tests/trybuild.rs @@ -1,4 +1,4 @@ -#[rustversion::stable(1.68)] // MSRV +#[rustversion::stable(1.72)] // MSRV #[test] fn compile_macros() { let t = trybuild::TestCases::new(); diff --git a/actix-multipart/CHANGES.md b/actix-multipart/CHANGES.md index 50faf7cfa..c3f3b6e39 100644 --- a/actix-multipart/CHANGES.md +++ b/actix-multipart/CHANGES.md @@ -2,6 +2,31 @@ ## Unreleased +## 0.7.2 + +- Fix re-exported version of `actix-multipart-derive`. + +## 0.7.1 + +- Expose `LimitExceeded` error type. + +## 0.7.0 + +- Add `MultipartError::ContentTypeIncompatible` variant. +- Add `MultipartError::ContentDispositionNameMissing` variant. +- Add `Field::bytes()` method. +- Rename `MultipartError::{NoContentDisposition => ContentDispositionMissing}` variant. +- Rename `MultipartError::{NoContentType => ContentTypeMissing}` variant. +- Rename `MultipartError::{ParseContentType => ContentTypeParse}` variant. +- Rename `MultipartError::{Boundary => BoundaryMissing}` variant. +- Rename `MultipartError::{UnsupportedField => UnknownField}` variant. +- Remove top-level re-exports of `test` utilities. + +## 0.6.2 + +- Add testing utilities under new module `test`. +- Minimum supported Rust version (MSRV) is now 1.72. + ## 0.6.1 - Minimum supported Rust version (MSRV) is now 1.68 due to transitive `time` dependency. diff --git a/actix-multipart/Cargo.toml b/actix-multipart/Cargo.toml index 257d56132..7a80b265f 100644 --- a/actix-multipart/Cargo.toml +++ b/actix-multipart/Cargo.toml @@ -1,32 +1,47 @@ [package] name = "actix-multipart" -version = "0.6.1" +version = "0.7.2" authors = [ "Nikolay Kim ", "Jacob Halsey ", + "Rob Ede ", ] -description = "Multipart form support for Actix Web" -keywords = ["http", "web", "framework", "async", "futures"] -homepage = "https://actix.rs" -repository = "https://github.com/actix/actix-web" -license = "MIT OR Apache-2.0" -edition = "2021" +description = "Multipart request & form support for Actix Web" +keywords = ["http", "actix", "web", "multipart", "form"] +homepage.workspace = true +repository.workspace = true +license.workspace = true +edition.workspace = true [package.metadata.docs.rs] rustdoc-args = ["--cfg", "docsrs"] all-features = true +[package.metadata.cargo_check_external_types] +allowed_external_types = [ + "actix_http::*", + "actix_multipart_derive::*", + "actix_utils::*", + "actix_web::*", + "bytes::*", + "futures_core::*", + "mime::*", + "serde_json::*", + "serde_plain::*", + "serde::*", + "tempfile::*", +] + [features] default = ["tempfile", "derive"] derive = ["actix-multipart-derive"] tempfile = ["dep:tempfile", "tokio/fs"] [dependencies] -actix-multipart-derive = { version = "=0.6.1", optional = true } +actix-multipart-derive = { version = "=0.7.0", optional = true } actix-utils = "3" actix-web = { version = "4", default-features = false } -bytes = "1" derive_more = "0.99.5" futures-core = { version = "0.3.17", default-features = false, features = ["alloc"] } futures-util = { version = "0.3.17", default-features = false, features = ["alloc"] } @@ -35,6 +50,7 @@ local-waker = "0.1" log = "0.4" memchr = "2.5" mime = "0.3" +rand = "0.8" serde = "1" serde_json = "1" serde_plain = "1" @@ -46,7 +62,15 @@ actix-http = "3" actix-multipart-rfc7578 = "0.10" actix-rt = "2.2" actix-test = "0.1" +actix-web = "4" +assert_matches = "1" awc = "3" +env_logger = "0.11" futures-util = { version = "0.3.17", default-features = false, features = ["alloc"] } +futures-test = "0.3" +multer = "3" tokio = { version = "1.24.2", features = ["sync"] } tokio-stream = "0.1" + +[lints] +workspace = true diff --git a/actix-multipart/README.md b/actix-multipart/README.md index 8fe0328ab..ec2e94bd8 100644 --- a/actix-multipart/README.md +++ b/actix-multipart/README.md @@ -1,17 +1,74 @@ -# actix-multipart +# `actix-multipart` -> Multipart form support for Actix Web. + [![crates.io](https://img.shields.io/crates/v/actix-multipart?label=latest)](https://crates.io/crates/actix-multipart) -[![Documentation](https://docs.rs/actix-multipart/badge.svg?version=0.6.1)](https://docs.rs/actix-multipart/0.6.1) -![Version](https://img.shields.io/badge/rustc-1.68+-ab6000.svg) +[![Documentation](https://docs.rs/actix-multipart/badge.svg?version=0.7.2)](https://docs.rs/actix-multipart/0.7.2) +![Version](https://img.shields.io/badge/rustc-1.72+-ab6000.svg) ![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-multipart.svg)
-[![dependency status](https://deps.rs/crate/actix-multipart/0.6.1/status.svg)](https://deps.rs/crate/actix-multipart/0.6.1) +[![dependency status](https://deps.rs/crate/actix-multipart/0.7.2/status.svg)](https://deps.rs/crate/actix-multipart/0.7.2) [![Download](https://img.shields.io/crates/d/actix-multipart.svg)](https://crates.io/crates/actix-multipart) [![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x) -## Documentation & Resources + -- [API Documentation](https://docs.rs/actix-multipart) -- Minimum Supported Rust Version (MSRV): 1.68 + + +Multipart request & form support for Actix Web. + +The [`Multipart`] extractor aims to support all kinds of `multipart/*` requests, including `multipart/form-data`, `multipart/related` and `multipart/mixed`. This is a lower-level extractor which supports reading [multipart fields](Field), in the order they are sent by the client. + +Due to additional requirements for `multipart/form-data` requests, the higher level [`MultipartForm`] extractor and derive macro only supports this media type. + +## Examples + +```rust +use actix_web::{post, App, HttpServer, Responder}; + +use actix_multipart::form::{json::Json as MpJson, tempfile::TempFile, MultipartForm}; +use serde::Deserialize; + +#[derive(Debug, Deserialize)] +struct Metadata { + name: String, +} + +#[derive(Debug, MultipartForm)] +struct UploadForm { + #[multipart(limit = "100MB")] + file: TempFile, + json: MpJson, +} + +#[post("/videos")] +pub async fn post_video(MultipartForm(form): MultipartForm) -> impl Responder { + format!( + "Uploaded file {}, with size: {}", + form.json.name, form.file.size + ) +} + +#[actix_web::main] +async fn main() -> std::io::Result<()> { + HttpServer::new(move || App::new().service(post_video)) + .bind(("127.0.0.1", 8080))? + .run() + .await +} +``` + +cURL request: + +```sh +curl -v --request POST \ + --url http://localhost:8080/videos \ + -F 'json={"name": "Cargo.lock"};type=application/json' \ + -F file=@./Cargo.lock +``` + +[`MultipartForm`]: struct@form::MultipartForm + + + +[More available in the examples repo →](https://github.com/actix/examples/tree/master/forms/multipart) diff --git a/actix-multipart/examples/form.rs b/actix-multipart/examples/form.rs new file mode 100644 index 000000000..a90aeff96 --- /dev/null +++ b/actix-multipart/examples/form.rs @@ -0,0 +1,36 @@ +use actix_multipart::form::{json::Json as MpJson, tempfile::TempFile, MultipartForm}; +use actix_web::{middleware::Logger, post, App, HttpServer, Responder}; +use serde::Deserialize; + +#[derive(Debug, Deserialize)] +struct Metadata { + name: String, +} + +#[derive(Debug, MultipartForm)] +struct UploadForm { + #[multipart(limit = "100MB")] + file: TempFile, + json: MpJson, +} + +#[post("/videos")] +async fn post_video(MultipartForm(form): MultipartForm) -> impl Responder { + format!( + "Uploaded file {}, with size: {}\ntemporary file ({}) was deleted\n", + form.json.name, + form.file.size, + form.file.file.path().display(), + ) +} + +#[actix_web::main] +async fn main() -> std::io::Result<()> { + env_logger::init_from_env(env_logger::Env::new().default_filter_or("info")); + + HttpServer::new(move || App::new().service(post_video).wrap(Logger::default())) + .workers(2) + .bind(("127.0.0.1", 8080))? + .run() + .await +} diff --git a/actix-multipart/src/error.rs b/actix-multipart/src/error.rs index 77b5a559f..cdb608738 100644 --- a/actix-multipart/src/error.rs +++ b/actix-multipart/src/error.rs @@ -10,78 +10,96 @@ use derive_more::{Display, Error, From}; /// A set of errors that can occur during parsing multipart streams. #[derive(Debug, Display, From, Error)] #[non_exhaustive] -pub enum MultipartError { - /// Content-Disposition header is not found or is not equal to "form-data". +pub enum Error { + /// Could not find Content-Type header. + #[display(fmt = "Could not find Content-Type header")] + ContentTypeMissing, + + /// Could not parse Content-Type header. + #[display(fmt = "Could not parse Content-Type header")] + ContentTypeParse, + + /// Parsed Content-Type did not have "multipart" top-level media type. /// - /// According to [RFC 7578 §4.2](https://datatracker.ietf.org/doc/html/rfc7578#section-4.2) a - /// Content-Disposition header must always be present and equal to "form-data". - #[display(fmt = "No Content-Disposition `form-data` header")] - NoContentDisposition, + /// Also raised when extracting a [`MultipartForm`] from a request that does not have the + /// "multipart/form-data" media type. + /// + /// [`MultipartForm`]: struct@crate::form::MultipartForm + #[display(fmt = "Parsed Content-Type did not have "multipart" top-level media type")] + ContentTypeIncompatible, - /// Content-Type header is not found - #[display(fmt = "No Content-Type header found")] - NoContentType, - - /// Can not parse Content-Type header - #[display(fmt = "Can not parse Content-Type header")] - ParseContentType, - - /// Multipart boundary is not found + /// Multipart boundary is not found. #[display(fmt = "Multipart boundary is not found")] - Boundary, + BoundaryMissing, - /// Nested multipart is not supported + /// Content-Disposition header was not found or not of disposition type "form-data" when parsing + /// a "form-data" field. + /// + /// As per [RFC 7578 §4.2], a "multipart/form-data" field's Content-Disposition header must + /// always be present and have a disposition type of "form-data". + /// + /// [RFC 7578 §4.2]: https://datatracker.ietf.org/doc/html/rfc7578#section-4.2 + #[display(fmt = "Content-Disposition header was not found when parsing a \"form-data\" field")] + ContentDispositionMissing, + + /// Content-Disposition name parameter was not found when parsing a "form-data" field. + /// + /// As per [RFC 7578 §4.2], a "multipart/form-data" field's Content-Disposition header must + /// always include a "name" parameter. + /// + /// [RFC 7578 §4.2]: https://datatracker.ietf.org/doc/html/rfc7578#section-4.2 + #[display(fmt = "Content-Disposition header was not found when parsing a \"form-data\" field")] + ContentDispositionNameMissing, + + /// Nested multipart is not supported. #[display(fmt = "Nested multipart is not supported")] Nested, - /// Multipart stream is incomplete + /// Multipart stream is incomplete. #[display(fmt = "Multipart stream is incomplete")] Incomplete, - /// Error during field parsing - #[display(fmt = "{}", _0)] + /// Field parsing failed. + #[display(fmt = "Error during field parsing")] Parse(ParseError), - /// Payload error - #[display(fmt = "{}", _0)] + /// HTTP payload error. + #[display(fmt = "Payload error")] Payload(PayloadError), - /// Not consumed - #[display(fmt = "Multipart stream is not consumed")] + /// Stream is not consumed. + #[display(fmt = "Stream is not consumed")] NotConsumed, - /// An error from a field handler in a form - #[display( - fmt = "An error occurred processing field `{}`: {}", - field_name, - source - )] + /// Form field handler raised error. + #[display(fmt = "An error occurred processing field: {name}")] Field { - field_name: String, + name: String, source: actix_web::Error, }, - /// Duplicate field - #[display(fmt = "Duplicate field found for: `{}`", _0)] + /// Duplicate field found (for structure that opted-in to denying duplicate fields). + #[display(fmt = "Duplicate field found: {_0}")] #[from(ignore)] DuplicateField(#[error(not(source))] String), - /// Missing field - #[display(fmt = "Field with name `{}` is required", _0)] + /// Required field is missing. + #[display(fmt = "Required field is missing: {_0}")] #[from(ignore)] MissingField(#[error(not(source))] String), - /// Unknown field - #[display(fmt = "Unsupported field `{}`", _0)] + /// Unknown field (for structure that opted-in to denying unknown fields). + #[display(fmt = "Unknown field: {_0}")] #[from(ignore)] - UnsupportedField(#[error(not(source))] String), + UnknownField(#[error(not(source))] String), } -/// Return `BadRequest` for `MultipartError` -impl ResponseError for MultipartError { +/// Return `BadRequest` for `MultipartError`. +impl ResponseError for Error { fn status_code(&self) -> StatusCode { match &self { - MultipartError::Field { source, .. } => source.as_response_error().status_code(), + Error::Field { source, .. } => source.as_response_error().status_code(), + Error::ContentTypeIncompatible => StatusCode::UNSUPPORTED_MEDIA_TYPE, _ => StatusCode::BAD_REQUEST, } } @@ -93,7 +111,7 @@ mod tests { #[test] fn test_multipart_error() { - let resp = MultipartError::Boundary.error_response(); + let resp = Error::BoundaryMissing.error_response(); assert_eq!(resp.status(), StatusCode::BAD_REQUEST); } } diff --git a/actix-multipart/src/extractor.rs b/actix-multipart/src/extractor.rs index 56ed69ae4..31999228e 100644 --- a/actix-multipart/src/extractor.rs +++ b/actix-multipart/src/extractor.rs @@ -1,21 +1,20 @@ -//! Multipart payload support - use actix_utils::future::{ready, Ready}; use actix_web::{dev::Payload, Error, FromRequest, HttpRequest}; -use crate::server::Multipart; +use crate::multipart::Multipart; -/// Get request's payload as multipart stream. +/// Extract request's payload as multipart stream. /// -/// Content-type: multipart/form-data; +/// Content-type: multipart/*; /// /// # Examples +/// /// ``` -/// use actix_web::{web, HttpResponse, Error}; +/// use actix_web::{web, HttpResponse}; /// use actix_multipart::Multipart; /// use futures_util::StreamExt as _; /// -/// async fn index(mut payload: Multipart) -> Result { +/// async fn index(mut payload: Multipart) -> actix_web::Result { /// // iterate over multipart stream /// while let Some(item) = payload.next().await { /// let mut field = item?; @@ -26,7 +25,7 @@ use crate::server::Multipart; /// } /// } /// -/// Ok(HttpResponse::Ok().into()) +/// Ok(HttpResponse::Ok().finish()) /// } /// ``` impl FromRequest for Multipart { @@ -35,9 +34,6 @@ impl FromRequest for Multipart { #[inline] fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future { - ready(Ok(match Multipart::boundary(req.headers()) { - Ok(boundary) => Multipart::from_boundary(boundary, payload.take()), - Err(err) => Multipart::from_error(err), - })) + ready(Ok(Multipart::from_req(req, payload))) } } diff --git a/actix-multipart/src/field.rs b/actix-multipart/src/field.rs new file mode 100644 index 000000000..f4eb601fb --- /dev/null +++ b/actix-multipart/src/field.rs @@ -0,0 +1,501 @@ +use std::{ + cell::RefCell, + cmp, fmt, + future::poll_fn, + mem, + pin::Pin, + rc::Rc, + task::{ready, Context, Poll}, +}; + +use actix_web::{ + error::PayloadError, + http::header::{self, ContentDisposition, HeaderMap}, + web::{Bytes, BytesMut}, +}; +use derive_more::{Display, Error}; +use futures_core::Stream; +use mime::Mime; + +use crate::{ + error::Error, + payload::{PayloadBuffer, PayloadRef}, + safety::Safety, +}; + +/// Error type returned from [`Field::bytes()`] when field data is larger than limit. +#[derive(Debug, Display, Error)] +#[display(fmt = "size limit exceeded while collecting field data")] +#[non_exhaustive] +pub struct LimitExceeded; + +/// A single field in a multipart stream. +pub struct Field { + /// Field's Content-Type. + content_type: Option, + + /// Field's Content-Disposition. + content_disposition: Option, + + /// Form field name. + /// + /// A non-optional storage for form field names to avoid unwraps in `form` module. Will be an + /// empty string in non-form contexts. + /// + // INVARIANT: always non-empty when request content-type is multipart/form-data. + pub(crate) form_field_name: String, + + /// Field's header map. + headers: HeaderMap, + + safety: Safety, + inner: Rc>, +} + +impl Field { + pub(crate) fn new( + content_type: Option, + content_disposition: Option, + form_field_name: Option, + headers: HeaderMap, + safety: Safety, + inner: Rc>, + ) -> Self { + Field { + content_type, + content_disposition, + form_field_name: form_field_name.unwrap_or_default(), + headers, + inner, + safety, + } + } + + /// Returns a reference to the field's header map. + pub fn headers(&self) -> &HeaderMap { + &self.headers + } + + /// Returns a reference to the field's content (mime) type, if it is supplied by the client. + /// + /// According to [RFC 7578](https://www.rfc-editor.org/rfc/rfc7578#section-4.4), if it is not + /// present, it should default to "text/plain". Note it is the responsibility of the client to + /// provide the appropriate content type, there is no attempt to validate this by the server. + pub fn content_type(&self) -> Option<&Mime> { + self.content_type.as_ref() + } + + /// Returns this field's parsed Content-Disposition header, if set. + /// + /// # Validation + /// + /// Per [RFC 7578 §4.2], the parts of a multipart/form-data payload MUST contain a + /// Content-Disposition header field where the disposition type is `form-data` and MUST also + /// contain an additional parameter of `name` with its value being the original field name from + /// the form. This requirement is enforced during extraction for multipart/form-data requests, + /// but not other kinds of multipart requests (such as multipart/related). + /// + /// As such, it is safe to `.unwrap()` calls `.content_disposition()` if you've verified. + /// + /// The [`name()`](Self::name) method is also provided as a convenience for obtaining the + /// aforementioned name parameter. + /// + /// [RFC 7578 §4.2]: https://datatracker.ietf.org/doc/html/rfc7578#section-4.2 + pub fn content_disposition(&self) -> Option<&ContentDisposition> { + self.content_disposition.as_ref() + } + + /// Returns the field's name, if set. + /// + /// See [`content_disposition()`](Self::content_disposition) regarding guarantees on presence of + /// the "name" field. + pub fn name(&self) -> Option<&str> { + self.content_disposition()?.get_name() + } + + /// Collects the raw field data, up to `limit` bytes. + /// + /// # Errors + /// + /// Any errors produced by the data stream are returned as `Ok(Err(Error))` immediately. + /// + /// If the buffered data size would exceed `limit`, an `Err(LimitExceeded)` is returned. Note + /// that, in this case, the full data stream is exhausted before returning the error so that + /// subsequent fields can still be read. To better defend against malicious/infinite requests, + /// it is advisable to also put a timeout on this call. + pub async fn bytes(&mut self, limit: usize) -> Result, LimitExceeded> { + /// Sensible default (2kB) for initial, bounded allocation when collecting body bytes. + const INITIAL_ALLOC_BYTES: usize = 2 * 1024; + + let mut exceeded_limit = false; + let mut buf = BytesMut::with_capacity(INITIAL_ALLOC_BYTES); + + let mut field = Pin::new(self); + + match poll_fn(|cx| loop { + match ready!(field.as_mut().poll_next(cx)) { + // if already over limit, discard chunk to advance multipart request + Some(Ok(_chunk)) if exceeded_limit => {} + + // if limit is exceeded set flag to true and continue + Some(Ok(chunk)) if buf.len() + chunk.len() > limit => { + exceeded_limit = true; + // eagerly de-allocate field data buffer + let _ = mem::take(&mut buf); + } + + Some(Ok(chunk)) => buf.extend_from_slice(&chunk), + + None => return Poll::Ready(Ok(())), + Some(Err(err)) => return Poll::Ready(Err(err)), + } + }) + .await + { + // propagate error returned from body poll + Err(err) => Ok(Err(err)), + + // limit was exceeded while reading body + Ok(()) if exceeded_limit => Err(LimitExceeded), + + // otherwise return body buffer + Ok(()) => Ok(Ok(buf.freeze())), + } + } +} + +impl Stream for Field { + type Item = Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.get_mut(); + let mut inner = this.inner.borrow_mut(); + + if let Some(mut buffer) = inner + .payload + .as_ref() + .expect("Field should not be polled after completion") + .get_mut(&this.safety) + { + // check safety and poll read payload to buffer. + buffer.poll_stream(cx)?; + } else if !this.safety.is_clean() { + // safety violation + return Poll::Ready(Some(Err(Error::NotConsumed))); + } else { + return Poll::Pending; + } + + inner.poll(&this.safety) + } +} + +impl fmt::Debug for Field { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + if let Some(ct) = &self.content_type { + writeln!(f, "\nField: {}", ct)?; + } else { + writeln!(f, "\nField:")?; + } + writeln!(f, " boundary: {}", self.inner.borrow().boundary)?; + writeln!(f, " headers:")?; + for (key, val) in self.headers.iter() { + writeln!(f, " {:?}: {:?}", key, val)?; + } + Ok(()) + } +} + +pub(crate) struct InnerField { + /// Payload is initialized as Some and is `take`n when the field stream finishes. + payload: Option, + + /// Field boundary (without "--" prefix). + boundary: String, + + /// True if request payload has been exhausted. + eof: bool, + + /// Field data's stated size according to it's Content-Length header. + length: Option, +} + +impl InnerField { + pub(crate) fn new_in_rc( + payload: PayloadRef, + boundary: String, + headers: &HeaderMap, + ) -> Result>, PayloadError> { + Self::new(payload, boundary, headers).map(|this| Rc::new(RefCell::new(this))) + } + + pub(crate) fn new( + payload: PayloadRef, + boundary: String, + headers: &HeaderMap, + ) -> Result { + let len = if let Some(len) = headers.get(&header::CONTENT_LENGTH) { + match len.to_str().ok().and_then(|len| len.parse::().ok()) { + Some(len) => Some(len), + None => return Err(PayloadError::Incomplete(None)), + } + } else { + None + }; + + Ok(InnerField { + boundary, + payload: Some(payload), + eof: false, + length: len, + }) + } + + /// Reads body part content chunk of the specified size. + /// + /// The body part must has `Content-Length` header with proper value. + pub(crate) fn read_len( + payload: &mut PayloadBuffer, + size: &mut u64, + ) -> Poll>> { + if *size == 0 { + Poll::Ready(None) + } else { + match payload.read_max(*size)? { + Some(mut chunk) => { + let len = cmp::min(chunk.len() as u64, *size); + *size -= len; + let ch = chunk.split_to(len as usize); + if !chunk.is_empty() { + payload.unprocessed(chunk); + } + Poll::Ready(Some(Ok(ch))) + } + None => { + if payload.eof && (*size != 0) { + Poll::Ready(Some(Err(Error::Incomplete))) + } else { + Poll::Pending + } + } + } + } + } + + /// Reads content chunk of body part with unknown length. + /// + /// The `Content-Length` header for body part is not necessary. + pub(crate) fn read_stream( + payload: &mut PayloadBuffer, + boundary: &str, + ) -> Poll>> { + let mut pos = 0; + + let len = payload.buf.len(); + + if len == 0 { + return if payload.eof { + Poll::Ready(Some(Err(Error::Incomplete))) + } else { + Poll::Pending + }; + } + + // check boundary + if len > 4 && payload.buf[0] == b'\r' { + let b_len = if payload.buf.starts_with(b"\r\n") && &payload.buf[2..4] == b"--" { + Some(4) + } else if &payload.buf[1..3] == b"--" { + Some(3) + } else { + None + }; + + if let Some(b_len) = b_len { + let b_size = boundary.len() + b_len; + if len < b_size { + return Poll::Pending; + } else if &payload.buf[b_len..b_size] == boundary.as_bytes() { + // found boundary + return Poll::Ready(None); + } + } + } + + loop { + return if let Some(idx) = memchr::memmem::find(&payload.buf[pos..], b"\r") { + let cur = pos + idx; + + // check if we have enough data for boundary detection + if cur + 4 > len { + if cur > 0 { + Poll::Ready(Some(Ok(payload.buf.split_to(cur).freeze()))) + } else { + Poll::Pending + } + } else { + // check boundary + if (&payload.buf[cur..cur + 2] == b"\r\n" + && &payload.buf[cur + 2..cur + 4] == b"--") + || (&payload.buf[cur..=cur] == b"\r" + && &payload.buf[cur + 1..cur + 3] == b"--") + { + if cur != 0 { + // return buffer + Poll::Ready(Some(Ok(payload.buf.split_to(cur).freeze()))) + } else { + pos = cur + 1; + continue; + } + } else { + // not boundary + pos = cur + 1; + continue; + } + } + } else { + Poll::Ready(Some(Ok(payload.buf.split().freeze()))) + }; + } + } + + pub(crate) fn poll(&mut self, safety: &Safety) -> Poll>> { + if self.payload.is_none() { + return Poll::Ready(None); + } + + let Some(mut payload) = self + .payload + .as_ref() + .expect("Field should not be polled after completion") + .get_mut(safety) + else { + return Poll::Pending; + }; + + if !self.eof { + let res = if let Some(ref mut len) = self.length { + Self::read_len(&mut payload, len) + } else { + Self::read_stream(&mut payload, &self.boundary) + }; + + match ready!(res) { + Some(Ok(bytes)) => return Poll::Ready(Some(Ok(bytes))), + Some(Err(err)) => return Poll::Ready(Some(Err(err))), + None => self.eof = true, + } + } + + let result = match payload.readline() { + Ok(None) => Poll::Pending, + Ok(Some(line)) => { + if line.as_ref() != b"\r\n" { + log::warn!("multipart field did not read all the data or it is malformed"); + } + Poll::Ready(None) + } + Err(err) => Poll::Ready(Some(Err(err))), + }; + + drop(payload); + + if let Poll::Ready(None) = result { + // drop payload buffer and make future un-poll-able + let _ = self.payload.take(); + } + + result + } +} + +#[cfg(test)] +mod tests { + use futures_util::{stream, StreamExt as _}; + + use super::*; + use crate::Multipart; + + // TODO: use test utility when multi-file support is introduced + fn create_double_request_with_header() -> (Bytes, HeaderMap) { + let bytes = Bytes::from( + "testasdadsad\r\n\ + --abbc761f78ff4d7cb7573b5a23f96ef0\r\n\ + Content-Disposition: form-data; name=\"file\"; filename=\"fn.txt\"\r\n\ + Content-Type: text/plain; charset=utf-8\r\n\ + \r\n\ + one+one+one\r\n\ + --abbc761f78ff4d7cb7573b5a23f96ef0\r\n\ + Content-Disposition: form-data; name=\"file\"; filename=\"fn.txt\"\r\n\ + Content-Type: text/plain; charset=utf-8\r\n\ + \r\n\ + two+two+two\r\n\ + --abbc761f78ff4d7cb7573b5a23f96ef0--\r\n", + ); + let mut headers = HeaderMap::new(); + headers.insert( + header::CONTENT_TYPE, + header::HeaderValue::from_static( + "multipart/mixed; boundary=\"abbc761f78ff4d7cb7573b5a23f96ef0\"", + ), + ); + (bytes, headers) + } + + #[actix_rt::test] + async fn bytes_unlimited() { + let (body, headers) = create_double_request_with_header(); + + let mut multipart = Multipart::new(&headers, stream::iter([Ok(body)])); + + let field = multipart + .next() + .await + .expect("multipart should have two fields") + .expect("multipart body should be well formatted") + .bytes(usize::MAX) + .await + .expect("field data should not be size limited") + .expect("reading field data should not error"); + assert_eq!(field, "one+one+one"); + + let field = multipart + .next() + .await + .expect("multipart should have two fields") + .expect("multipart body should be well formatted") + .bytes(usize::MAX) + .await + .expect("field data should not be size limited") + .expect("reading field data should not error"); + assert_eq!(field, "two+two+two"); + } + + #[actix_rt::test] + async fn bytes_limited() { + let (body, headers) = create_double_request_with_header(); + + let mut multipart = Multipart::new(&headers, stream::iter([Ok(body)])); + + multipart + .next() + .await + .expect("multipart should have two fields") + .expect("multipart body should be well formatted") + .bytes(8) // smaller than data size + .await + .expect_err("field data should be size limited"); + + // next field still readable + let field = multipart + .next() + .await + .expect("multipart should have two fields") + .expect("multipart body should be well formatted") + .bytes(usize::MAX) + .await + .expect("field data should not be size limited") + .expect("reading field data should not error"); + assert_eq!(field, "two+two+two"); + } +} diff --git a/actix-multipart/src/form/bytes.rs b/actix-multipart/src/form/bytes.rs index 3c5e2eb10..51b0cf7d9 100644 --- a/actix-multipart/src/form/bytes.rs +++ b/actix-multipart/src/form/bytes.rs @@ -1,7 +1,6 @@ //! Reads a field into memory. -use actix_web::HttpRequest; -use bytes::BytesMut; +use actix_web::{web::BytesMut, HttpRequest}; use futures_core::future::LocalBoxFuture; use futures_util::TryStreamExt as _; use mime::Mime; @@ -15,7 +14,7 @@ use crate::{ #[derive(Debug)] pub struct Bytes { /// The data. - pub data: bytes::Bytes, + pub data: actix_web::web::Bytes, /// The value of the `Content-Type` header. pub content_type: Option, @@ -41,8 +40,9 @@ impl<'t> FieldReader<'t> for Bytes { content_type: field.content_type().map(ToOwned::to_owned), file_name: field .content_disposition() + .expect("multipart form fields should have a content-disposition header") .get_filename() - .map(str::to_owned), + .map(ToOwned::to_owned), }) }) } diff --git a/actix-multipart/src/form/json.rs b/actix-multipart/src/form/json.rs index fb90a82b9..0118a8fba 100644 --- a/actix-multipart/src/form/json.rs +++ b/actix-multipart/src/form/json.rs @@ -32,7 +32,6 @@ where fn read_field(req: &'t HttpRequest, field: Field, limits: &'t mut Limits) -> Self::Future { Box::pin(async move { let config = JsonConfig::from_req(req); - let field_name = field.name().to_owned(); if config.validate_content_type { let valid = if let Some(mime) = field.content_type() { @@ -43,17 +42,19 @@ where if !valid { return Err(MultipartError::Field { - field_name, + name: field.form_field_name, source: config.map_error(req, JsonFieldError::ContentType), }); } } + let form_field_name = field.form_field_name.clone(); + let bytes = Bytes::read_field(req, field, limits).await?; Ok(Json(serde_json::from_slice(bytes.data.as_ref()).map_err( |err| MultipartError::Field { - field_name, + name: form_field_name, source: config.map_error(req, JsonFieldError::Deserialize(err)), }, )?)) @@ -131,14 +132,12 @@ impl Default for JsonConfig { #[cfg(test)] mod tests { - use std::{collections::HashMap, io::Cursor}; + use std::collections::HashMap; - use actix_multipart_rfc7578::client::multipart; - use actix_web::{http::StatusCode, web, App, HttpResponse, Responder}; + use actix_web::{http::StatusCode, web, web::Bytes, App, HttpResponse, Responder}; use crate::form::{ json::{Json, JsonConfig}, - tests::send_form, MultipartForm, }; @@ -155,6 +154,8 @@ mod tests { HttpResponse::Ok().finish() } + const TEST_JSON: &str = r#"{"key1": "value1", "key2": "value2"}"#; + #[actix_rt::test] async fn test_json_without_content_type() { let srv = actix_test::start(|| { @@ -163,10 +164,16 @@ mod tests { .app_data(JsonConfig::default().validate_content_type(false)) }); - let mut form = multipart::Form::default(); - form.add_text("json", "{\"key1\": \"value1\", \"key2\": \"value2\"}"); - let response = send_form(&srv, form, "/").await; - assert_eq!(response.status(), StatusCode::OK); + let (body, headers) = crate::test::create_form_data_payload_and_headers( + "json", + None, + None, + Bytes::from_static(TEST_JSON.as_bytes()), + ); + let mut req = srv.post("/"); + *req.headers_mut() = headers; + let res = req.send_body(body).await.unwrap(); + assert_eq!(res.status(), StatusCode::OK); } #[actix_rt::test] @@ -178,17 +185,27 @@ mod tests { }); // Deny because wrong content type - let bytes = Cursor::new("{\"key1\": \"value1\", \"key2\": \"value2\"}"); - let mut form = multipart::Form::default(); - form.add_reader_file_with_mime("json", bytes, "", mime::APPLICATION_OCTET_STREAM); - let response = send_form(&srv, form, "/").await; - assert_eq!(response.status(), StatusCode::BAD_REQUEST); + let (body, headers) = crate::test::create_form_data_payload_and_headers( + "json", + None, + Some(mime::APPLICATION_OCTET_STREAM), + Bytes::from_static(TEST_JSON.as_bytes()), + ); + let mut req = srv.post("/"); + *req.headers_mut() = headers; + let res = req.send_body(body).await.unwrap(); + assert_eq!(res.status(), StatusCode::BAD_REQUEST); // Allow because correct content type - let bytes = Cursor::new("{\"key1\": \"value1\", \"key2\": \"value2\"}"); - let mut form = multipart::Form::default(); - form.add_reader_file_with_mime("json", bytes, "", mime::APPLICATION_JSON); - let response = send_form(&srv, form, "/").await; - assert_eq!(response.status(), StatusCode::OK); + let (body, headers) = crate::test::create_form_data_payload_and_headers( + "json", + None, + Some(mime::APPLICATION_JSON), + Bytes::from_static(TEST_JSON.as_bytes()), + ); + let mut req = srv.post("/"); + *req.headers_mut() = headers; + let res = req.send_body(body).await.unwrap(); + assert_eq!(res.status(), StatusCode::OK); } } diff --git a/actix-multipart/src/form/mod.rs b/actix-multipart/src/form/mod.rs index 67adfd4b2..693a45e8e 100644 --- a/actix-multipart/src/form/mod.rs +++ b/actix-multipart/src/form/mod.rs @@ -1,4 +1,4 @@ -//! Process and extract typed data from a multipart stream. +//! Extract and process typed data from fields of a `multipart/form-data` request. use std::{ any::Any, @@ -33,6 +33,14 @@ pub trait FieldReader<'t>: Sized + Any { type Future: Future>; /// The form will call this function to handle the field. + /// + /// # Panics + /// + /// When reading the `field` payload using its `Stream` implementation, polling (manually or via + /// `next()`/`try_next()`) may panic after the payload is exhausted. If this is a problem for + /// your implementation of this method, you should [`fuse()`] the `Field` first. + /// + /// [`fuse()`]: futures_util::stream::StreamExt::fuse() fn read_field(req: &'t HttpRequest, field: Field, limits: &'t mut Limits) -> Self::Future; } @@ -72,13 +80,13 @@ where state: &'t mut State, duplicate_field: DuplicateField, ) -> Self::Future { - if state.contains_key(field.name()) { + if state.contains_key(&field.form_field_name) { match duplicate_field { DuplicateField::Ignore => return Box::pin(ready(Ok(()))), DuplicateField::Deny => { return Box::pin(ready(Err(MultipartError::DuplicateField( - field.name().to_owned(), + field.form_field_name, )))) } @@ -87,7 +95,7 @@ where } Box::pin(async move { - let field_name = field.name().to_owned(); + let field_name = field.form_field_name.clone(); let t = T::read_field(req, field, limits).await?; state.insert(field_name, Box::new(t)); Ok(()) @@ -115,10 +123,8 @@ where Box::pin(async move { // Note: Vec GroupReader always allows duplicates - let field_name = field.name().to_owned(); - let vec = state - .entry(field_name) + .entry(field.form_field_name.clone()) .or_insert_with(|| Box::>::default()) .downcast_mut::>() .unwrap(); @@ -151,13 +157,13 @@ where state: &'t mut State, duplicate_field: DuplicateField, ) -> Self::Future { - if state.contains_key(field.name()) { + if state.contains_key(&field.form_field_name) { match duplicate_field { DuplicateField::Ignore => return Box::pin(ready(Ok(()))), DuplicateField::Deny => { return Box::pin(ready(Err(MultipartError::DuplicateField( - field.name().to_owned(), + field.form_field_name, )))) } @@ -166,7 +172,7 @@ where } Box::pin(async move { - let field_name = field.name().to_owned(); + let field_name = field.form_field_name.clone(); let t = T::read_field(req, field, limits).await?; state.insert(field_name, Box::new(t)); Ok(()) @@ -273,6 +279,9 @@ impl Limits { /// [`MultipartCollect`] trait. You should use the [`macro@MultipartForm`] macro to derive this /// for your struct. /// +/// Note that this extractor rejects requests with any other Content-Type such as `multipart/mixed`, +/// `multipart/related`, or non-multipart media types. +/// /// Add a [`MultipartFormConfig`] to your app data to configure extraction. #[derive(Deref, DerefMut)] pub struct MultipartForm(pub T); @@ -286,14 +295,24 @@ impl MultipartForm { impl FromRequest for MultipartForm where - T: MultipartCollect, + T: MultipartCollect + 'static, { type Error = Error; type Future = LocalBoxFuture<'static, Result>; #[inline] fn from_request(req: &HttpRequest, payload: &mut dev::Payload) -> Self::Future { - let mut payload = Multipart::new(req.headers(), payload.take()); + let mut multipart = Multipart::from_req(req, payload); + + let content_type = match multipart.content_type_or_bail() { + Ok(content_type) => content_type, + Err(err) => return Box::pin(ready(Err(err.into()))), + }; + + if content_type.subtype() != mime::FORM_DATA { + // this extractor only supports multipart/form-data + return Box::pin(ready(Err(MultipartError::ContentTypeIncompatible.into()))); + }; let config = MultipartFormConfig::from_req(req); let mut limits = Limits::new(config.total_limit, config.memory_limit); @@ -305,21 +324,29 @@ where Box::pin( async move { let mut state = State::default(); - // We need to ensure field limits are shared for all instances of this field name + + // ensure limits are shared for all fields with this name let mut field_limits = HashMap::>::new(); - while let Some(field) = payload.try_next().await? { + while let Some(field) = multipart.try_next().await? { + debug_assert!( + !field.form_field_name.is_empty(), + "multipart form fields should have names", + ); + // Retrieve the limit for this field let entry = field_limits - .entry(field.name().to_owned()) - .or_insert_with(|| T::limit(field.name())); - limits.field_limit_remaining = entry.to_owned(); + .entry(field.form_field_name.clone()) + .or_insert_with(|| T::limit(&field.form_field_name)); + + limits.field_limit_remaining.clone_from(entry); T::handle_field(&req, field, &mut limits, &mut state).await?; // Update the stored limit *entry = limits.field_limit_remaining; } + let inner = T::from_state(state)?; Ok(MultipartForm(inner)) } @@ -395,11 +422,20 @@ mod tests { use actix_http::encoding::Decoder; use actix_multipart_rfc7578::client::multipart; use actix_test::TestServer; - use actix_web::{dev::Payload, http::StatusCode, web, App, HttpResponse, Responder}; + use actix_web::{ + dev::Payload, http::StatusCode, web, App, HttpRequest, HttpResponse, Resource, Responder, + }; use awc::{Client, ClientResponse}; + use futures_core::future::LocalBoxFuture; + use futures_util::TryStreamExt as _; use super::MultipartForm; - use crate::form::{bytes::Bytes, tempfile::TempFile, text::Text, MultipartFormConfig}; + use crate::{ + form::{ + bytes::Bytes, tempfile::TempFile, text::Text, FieldReader, Limits, MultipartFormConfig, + }, + Field, MultipartError, + }; pub async fn send_form( srv: &TestServer, @@ -733,4 +769,84 @@ mod tests { let response = send_form(&srv, form, "/").await; assert_eq!(response.status(), StatusCode::BAD_REQUEST); } + + #[actix_rt::test] + async fn non_multipart_form_data() { + #[derive(MultipartForm)] + struct TestNonMultipartFormData { + #[allow(unused)] + #[multipart(limit = "30B")] + foo: Text, + } + + async fn non_multipart_form_data_route( + _form: MultipartForm, + ) -> String { + unreachable!("request is sent with multipart/mixed"); + } + + let srv = actix_test::start(|| { + App::new().route("/", web::post().to(non_multipart_form_data_route)) + }); + + let mut form = multipart::Form::default(); + form.add_text("foo", "foo"); + + // mangle content-type, keeping the boundary + let ct = form.content_type().replacen("/form-data", "/mixed", 1); + + let res = Client::default() + .post(srv.url("/")) + .content_type(ct) + .send_body(multipart::Body::from(form)) + .await + .unwrap(); + + assert_eq!(res.status(), StatusCode::UNSUPPORTED_MEDIA_TYPE); + } + + #[should_panic(expected = "called `Result::unwrap()` on an `Err` value: Connect(Disconnected)")] + #[actix_web::test] + async fn field_try_next_panic() { + #[derive(Debug)] + struct NullSink; + + impl<'t> FieldReader<'t> for NullSink { + type Future = LocalBoxFuture<'t, Result>; + + fn read_field( + _: &'t HttpRequest, + mut field: Field, + _limits: &'t mut Limits, + ) -> Self::Future { + Box::pin(async move { + // exhaust field stream + while let Some(_chunk) = field.try_next().await? {} + + // poll again, crash + let _post = field.try_next().await; + + Ok(Self) + }) + } + } + + #[allow(dead_code)] + #[derive(MultipartForm)] + struct NullSinkForm { + foo: NullSink, + } + + async fn null_sink(_form: MultipartForm) -> impl Responder { + "unreachable" + } + + let srv = actix_test::start(|| App::new().service(Resource::new("/").post(null_sink))); + + let mut form = multipart::Form::default(); + form.add_text("foo", "data is not important to this test"); + + // panics with Err(Connect(Disconnected)) due to form NullSink panic + let _res = send_form(&srv, form, "/").await; + } } diff --git a/actix-multipart/src/form/tempfile.rs b/actix-multipart/src/form/tempfile.rs index 9371a026b..f329876f2 100644 --- a/actix-multipart/src/form/tempfile.rs +++ b/actix-multipart/src/form/tempfile.rs @@ -42,38 +42,36 @@ impl<'t> FieldReader<'t> for TempFile { fn read_field(req: &'t HttpRequest, mut field: Field, limits: &'t mut Limits) -> Self::Future { Box::pin(async move { let config = TempFileConfig::from_req(req); - let field_name = field.name().to_owned(); let mut size = 0; - let file = config - .create_tempfile() - .map_err(|err| config.map_error(req, &field_name, TempFileError::FileIo(err)))?; + let file = config.create_tempfile().map_err(|err| { + config.map_error(req, &field.form_field_name, TempFileError::FileIo(err)) + })?; - let mut file_async = - tokio::fs::File::from_std(file.reopen().map_err(|err| { - config.map_error(req, &field_name, TempFileError::FileIo(err)) - })?); + let mut file_async = tokio::fs::File::from_std(file.reopen().map_err(|err| { + config.map_error(req, &field.form_field_name, TempFileError::FileIo(err)) + })?); while let Some(chunk) = field.try_next().await? { limits.try_consume_limits(chunk.len(), false)?; size += chunk.len(); file_async.write_all(chunk.as_ref()).await.map_err(|err| { - config.map_error(req, &field_name, TempFileError::FileIo(err)) + config.map_error(req, &field.form_field_name, TempFileError::FileIo(err)) })?; } - file_async - .flush() - .await - .map_err(|err| config.map_error(req, &field_name, TempFileError::FileIo(err)))?; + file_async.flush().await.map_err(|err| { + config.map_error(req, &field.form_field_name, TempFileError::FileIo(err)) + })?; Ok(TempFile { file, content_type: field.content_type().map(ToOwned::to_owned), file_name: field .content_disposition() + .expect("multipart form fields should have a content-disposition header") .get_filename() - .map(str::to_owned), + .map(ToOwned::to_owned), size, }) }) @@ -137,7 +135,7 @@ impl TempFileConfig { }; MultipartError::Field { - field_name: field_name.to_owned(), + name: field_name.to_owned(), source, } } diff --git a/actix-multipart/src/form/text.rs b/actix-multipart/src/form/text.rs index 83e211524..67a434ee6 100644 --- a/actix-multipart/src/form/text.rs +++ b/actix-multipart/src/form/text.rs @@ -36,7 +36,6 @@ where fn read_field(req: &'t HttpRequest, field: Field, limits: &'t mut Limits) -> Self::Future { Box::pin(async move { let config = TextConfig::from_req(req); - let field_name = field.name().to_owned(); if config.validate_content_type { let valid = if let Some(mime) = field.content_type() { @@ -49,22 +48,24 @@ where if !valid { return Err(MultipartError::Field { - field_name, + name: field.form_field_name, source: config.map_error(req, TextError::ContentType), }); } } + let form_field_name = field.form_field_name.clone(); + let bytes = Bytes::read_field(req, field, limits).await?; let text = str::from_utf8(&bytes.data).map_err(|err| MultipartError::Field { - field_name: field_name.clone(), + name: form_field_name.clone(), source: config.map_error(req, TextError::Utf8Error(err)), })?; Ok(Text(serde_plain::from_str(text).map_err(|err| { MultipartError::Field { - field_name, + name: form_field_name, source: config.map_error(req, TextError::Deserialize(err)), } })?)) diff --git a/actix-multipart/src/lib.rs b/actix-multipart/src/lib.rs index 495bae9c0..8eea35f2e 100644 --- a/actix-multipart/src/lib.rs +++ b/actix-multipart/src/lib.rs @@ -1,8 +1,61 @@ -//! Multipart form support for Actix Web. +//! Multipart request & form support for Actix Web. +//! +//! The [`Multipart`] extractor aims to support all kinds of `multipart/*` requests, including +//! `multipart/form-data`, `multipart/related` and `multipart/mixed`. This is a lower-level +//! extractor which supports reading [multipart fields](Field), in the order they are sent by the +//! client. +//! +//! Due to additional requirements for `multipart/form-data` requests, the higher level +//! [`MultipartForm`] extractor and derive macro only supports this media type. +//! +//! # Examples +//! +//! ```no_run +//! use actix_web::{post, App, HttpServer, Responder}; +//! +//! use actix_multipart::form::{json::Json as MpJson, tempfile::TempFile, MultipartForm}; +//! use serde::Deserialize; +//! +//! #[derive(Debug, Deserialize)] +//! struct Metadata { +//! name: String, +//! } +//! +//! #[derive(Debug, MultipartForm)] +//! struct UploadForm { +//! #[multipart(limit = "100MB")] +//! file: TempFile, +//! json: MpJson, +//! } +//! +//! #[post("/videos")] +//! pub async fn post_video(MultipartForm(form): MultipartForm) -> impl Responder { +//! format!( +//! "Uploaded file {}, with size: {}", +//! form.json.name, form.file.size +//! ) +//! } +//! +//! #[actix_web::main] +//! async fn main() -> std::io::Result<()> { +//! HttpServer::new(move || App::new().service(post_video)) +//! .bind(("127.0.0.1", 8080))? +//! .run() +//! .await +//! } +//! ``` +//! +//! cURL request: +//! +//! ```sh +//! curl -v --request POST \ +//! --url http://localhost:8080/videos \ +//! -F 'json={"name": "Cargo.lock"};type=application/json' \ +//! -F file=@./Cargo.lock +//! ``` +//! +//! [`MultipartForm`]: struct@form::MultipartForm -#![deny(rust_2018_idioms, nonstandard_style)] -#![warn(future_incompatible)] -#![allow(clippy::borrow_interior_mutable_const)] #![doc(html_logo_url = "https://actix.rs/img/logo.png")] #![doc(html_favicon_url = "https://actix.rs/favicon.ico")] #![cfg_attr(docsrs, feature(doc_auto_cfg))] @@ -13,11 +66,15 @@ extern crate self as actix_multipart; mod error; mod extractor; -mod server; - +pub(crate) mod field; pub mod form; +mod multipart; +pub(crate) mod payload; +pub(crate) mod safety; +pub mod test; pub use self::{ - error::MultipartError, - server::{Field, Multipart}, + error::Error as MultipartError, + field::{Field, LimitExceeded}, + multipart::Multipart, }; diff --git a/actix-multipart/src/multipart.rs b/actix-multipart/src/multipart.rs new file mode 100644 index 000000000..e38fbde9e --- /dev/null +++ b/actix-multipart/src/multipart.rs @@ -0,0 +1,883 @@ +//! Multipart response payload support. + +use std::{ + cell::RefCell, + pin::Pin, + rc::Rc, + task::{Context, Poll}, +}; + +use actix_web::{ + dev, + error::{ParseError, PayloadError}, + http::header::{self, ContentDisposition, HeaderMap, HeaderName, HeaderValue}, + web::Bytes, + HttpRequest, +}; +use futures_core::stream::Stream; +use mime::Mime; + +use crate::{ + error::Error, + field::InnerField, + payload::{PayloadBuffer, PayloadRef}, + safety::Safety, + Field, +}; + +const MAX_HEADERS: usize = 32; + +/// The server-side implementation of `multipart/form-data` requests. +/// +/// This will parse the incoming stream into `MultipartItem` instances via its `Stream` +/// implementation. `MultipartItem::Field` contains multipart field. `MultipartItem::Multipart` is +/// used for nested multipart streams. +pub struct Multipart { + flow: Flow, + safety: Safety, +} + +enum Flow { + InFlight(Inner), + + /// Error container is Some until an error is returned out of the flow. + Error(Option), +} + +impl Multipart { + /// Creates multipart instance from parts. + pub fn new(headers: &HeaderMap, stream: S) -> Self + where + S: Stream> + 'static, + { + match Self::find_ct_and_boundary(headers) { + Ok((ct, boundary)) => Self::from_ct_and_boundary(ct, boundary, stream), + Err(err) => Self::from_error(err), + } + } + + /// Creates multipart instance from parts. + pub(crate) fn from_req(req: &HttpRequest, payload: &mut dev::Payload) -> Self { + match Self::find_ct_and_boundary(req.headers()) { + Ok((ct, boundary)) => Self::from_ct_and_boundary(ct, boundary, payload.take()), + Err(err) => Self::from_error(err), + } + } + + /// Extract Content-Type and boundary info from headers. + pub(crate) fn find_ct_and_boundary(headers: &HeaderMap) -> Result<(Mime, String), Error> { + let content_type = headers + .get(&header::CONTENT_TYPE) + .ok_or(Error::ContentTypeMissing)? + .to_str() + .ok() + .and_then(|content_type| content_type.parse::().ok()) + .ok_or(Error::ContentTypeParse)?; + + if content_type.type_() != mime::MULTIPART { + return Err(Error::ContentTypeIncompatible); + } + + let boundary = content_type + .get_param(mime::BOUNDARY) + .ok_or(Error::BoundaryMissing)? + .as_str() + .to_owned(); + + Ok((content_type, boundary)) + } + + /// Constructs a new multipart reader from given Content-Type, boundary, and stream. + pub(crate) fn from_ct_and_boundary(ct: Mime, boundary: String, stream: S) -> Multipart + where + S: Stream> + 'static, + { + Multipart { + safety: Safety::new(), + flow: Flow::InFlight(Inner { + payload: PayloadRef::new(PayloadBuffer::new(stream)), + content_type: ct, + boundary, + state: State::FirstBoundary, + item: Item::None, + }), + } + } + + /// Constructs a new multipart reader from given `MultipartError`. + pub(crate) fn from_error(err: Error) -> Multipart { + Multipart { + flow: Flow::Error(Some(err)), + safety: Safety::new(), + } + } + + /// Return requests parsed Content-Type or raise the stored error. + pub(crate) fn content_type_or_bail(&mut self) -> Result { + match self.flow { + Flow::InFlight(ref inner) => Ok(inner.content_type.clone()), + Flow::Error(ref mut err) => Err(err + .take() + .expect("error should not be taken after it was returned")), + } + } +} + +impl Stream for Multipart { + type Item = Result; + + fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { + let this = self.get_mut(); + + match this.flow { + Flow::InFlight(ref mut inner) => { + if let Some(mut buffer) = inner.payload.get_mut(&this.safety) { + // check safety and poll read payload to buffer. + buffer.poll_stream(cx)?; + } else if !this.safety.is_clean() { + // safety violation + return Poll::Ready(Some(Err(Error::NotConsumed))); + } else { + return Poll::Pending; + } + + inner.poll(&this.safety, cx) + } + + Flow::Error(ref mut err) => Poll::Ready(Some(Err(err + .take() + .expect("Multipart polled after finish")))), + } + } +} + +#[derive(PartialEq, Debug)] +enum State { + /// Skip data until first boundary. + FirstBoundary, + + /// Reading boundary. + Boundary, + + /// Reading Headers. + Headers, + + /// Stream EOF. + Eof, +} + +enum Item { + None, + Field(Rc>), +} + +struct Inner { + /// Request's payload stream & buffer. + payload: PayloadRef, + + /// Request's Content-Type. + /// + /// Guaranteed to have "multipart" top-level media type, i.e., `multipart/*`. + content_type: Mime, + + /// Field boundary. + boundary: String, + + state: State, + item: Item, +} + +impl Inner { + fn read_field_headers(payload: &mut PayloadBuffer) -> Result, Error> { + match payload.read_until(b"\r\n\r\n")? { + None => { + if payload.eof { + Err(Error::Incomplete) + } else { + Ok(None) + } + } + + Some(bytes) => { + let mut hdrs = [httparse::EMPTY_HEADER; MAX_HEADERS]; + + match httparse::parse_headers(&bytes, &mut hdrs).map_err(ParseError::from)? { + httparse::Status::Complete((_, hdrs)) => { + // convert headers + let mut headers = HeaderMap::with_capacity(hdrs.len()); + + for h in hdrs { + let name = + HeaderName::try_from(h.name).map_err(|_| ParseError::Header)?; + let value = + HeaderValue::try_from(h.value).map_err(|_| ParseError::Header)?; + headers.append(name, value); + } + + Ok(Some(headers)) + } + + httparse::Status::Partial => Err(ParseError::Header.into()), + } + } + } + } + + /// Reads a field boundary from the payload buffer (and discards it). + /// + /// Reads "in-between" and "final" boundaries. E.g. for boundary = "foo": + /// + /// ```plain + /// --foo <-- in-between fields + /// --foo-- <-- end of request body, should be followed by EOF + /// ``` + /// + /// Returns: + /// + /// - `Ok(Some(true))` - final field boundary read (EOF) + /// - `Ok(Some(false))` - field boundary read + /// - `Ok(None)` - boundary not found, more data needs reading + /// - `Err(BoundaryMissing)` - multipart boundary is missing + fn read_boundary(payload: &mut PayloadBuffer, boundary: &str) -> Result, Error> { + // TODO: need to read epilogue + let chunk = match payload.readline_or_eof()? { + // TODO: this might be okay as a let Some() else return Ok(None) + None => return Ok(payload.eof.then_some(true)), + Some(chunk) => chunk, + }; + + const BOUNDARY_MARKER: &[u8] = b"--"; + const LINE_BREAK: &[u8] = b"\r\n"; + + let boundary_len = boundary.len(); + + if chunk.len() < boundary_len + 2 + 2 + || !chunk.starts_with(BOUNDARY_MARKER) + || &chunk[2..boundary_len + 2] != boundary.as_bytes() + { + return Err(Error::BoundaryMissing); + } + + // chunk facts: + // - long enough to contain boundary + 2 markers or 1 marker and line-break + // - starts with boundary marker + // - chunk contains correct boundary + + if &chunk[boundary_len + 2..] == LINE_BREAK { + // boundary is followed by line-break, indicating more fields to come + return Ok(Some(false)); + } + + // boundary is followed by marker + if &chunk[boundary_len + 2..boundary_len + 4] == BOUNDARY_MARKER + && ( + // chunk is exactly boundary len + 2 markers + chunk.len() == boundary_len + 2 + 2 + // final boundary is allowed to end with a line-break + || &chunk[boundary_len + 4..] == LINE_BREAK + ) + { + return Ok(Some(true)); + } + + Err(Error::BoundaryMissing) + } + + fn skip_until_boundary( + payload: &mut PayloadBuffer, + boundary: &str, + ) -> Result, Error> { + let mut eof = false; + + loop { + match payload.readline()? { + Some(chunk) => { + if chunk.is_empty() { + return Err(Error::BoundaryMissing); + } + if chunk.len() < boundary.len() { + continue; + } + if &chunk[..2] == b"--" && &chunk[2..chunk.len() - 2] == boundary.as_bytes() { + break; + } else { + if chunk.len() < boundary.len() + 2 { + continue; + } + let b: &[u8] = boundary.as_ref(); + if &chunk[..boundary.len()] == b + && &chunk[boundary.len()..boundary.len() + 2] == b"--" + { + eof = true; + break; + } + } + } + None => { + return if payload.eof { + Err(Error::Incomplete) + } else { + Ok(None) + }; + } + } + } + Ok(Some(eof)) + } + + fn poll(&mut self, safety: &Safety, cx: &Context<'_>) -> Poll>> { + if self.state == State::Eof { + Poll::Ready(None) + } else { + // release field + loop { + // Nested multipart streams of fields has to be consumed + // before switching to next + if safety.current() { + let stop = match self.item { + Item::Field(ref mut field) => match field.borrow_mut().poll(safety) { + Poll::Pending => return Poll::Pending, + Poll::Ready(Some(Ok(_))) => continue, + Poll::Ready(Some(Err(err))) => return Poll::Ready(Some(Err(err))), + Poll::Ready(None) => true, + }, + Item::None => false, + }; + if stop { + self.item = Item::None; + } + if let Item::None = self.item { + break; + } + } + } + + let field_headers = if let Some(mut payload) = self.payload.get_mut(safety) { + match self.state { + // read until first boundary + State::FirstBoundary => { + match Inner::skip_until_boundary(&mut payload, &self.boundary)? { + None => return Poll::Pending, + Some(eof) => { + if eof { + self.state = State::Eof; + return Poll::Ready(None); + } else { + self.state = State::Headers; + } + } + } + } + + // read boundary + State::Boundary => match Inner::read_boundary(&mut payload, &self.boundary)? { + None => return Poll::Pending, + Some(eof) => { + if eof { + self.state = State::Eof; + return Poll::Ready(None); + } else { + self.state = State::Headers; + } + } + }, + + _ => {} + } + + // read field headers for next field + if self.state == State::Headers { + if let Some(headers) = Inner::read_field_headers(&mut payload)? { + self.state = State::Boundary; + headers + } else { + return Poll::Pending; + } + } else { + unreachable!() + } + } else { + log::debug!("NotReady: field is in flight"); + return Poll::Pending; + }; + + let field_content_disposition = field_headers + .get(&header::CONTENT_DISPOSITION) + .and_then(|cd| ContentDisposition::from_raw(cd).ok()) + .filter(|content_disposition| { + matches!( + content_disposition.disposition, + header::DispositionType::FormData, + ) + }); + + let form_field_name = if self.content_type.subtype() == mime::FORM_DATA { + // According to RFC 7578 §4.2, which relates to "multipart/form-data" requests + // specifically, fields must have a Content-Disposition header, its disposition + // type must be set as "form-data", and it must have a name parameter. + + let Some(cd) = &field_content_disposition else { + return Poll::Ready(Some(Err(Error::ContentDispositionMissing))); + }; + + let Some(field_name) = cd.get_name() else { + return Poll::Ready(Some(Err(Error::ContentDispositionNameMissing))); + }; + + Some(field_name.to_owned()) + } else { + None + }; + + // TODO: check out other multipart/* RFCs for specific requirements + + let field_content_type: Option = field_headers + .get(&header::CONTENT_TYPE) + .and_then(|ct| ct.to_str().ok()) + .and_then(|ct| ct.parse().ok()); + + self.state = State::Boundary; + + // nested multipart stream is not supported + if let Some(mime) = &field_content_type { + if mime.type_() == mime::MULTIPART { + return Poll::Ready(Some(Err(Error::Nested))); + } + } + + let field_inner = + InnerField::new_in_rc(self.payload.clone(), self.boundary.clone(), &field_headers)?; + + self.item = Item::Field(Rc::clone(&field_inner)); + + Poll::Ready(Some(Ok(Field::new( + field_content_type, + field_content_disposition, + form_field_name, + field_headers, + safety.clone(cx), + field_inner, + )))) + } + } +} + +impl Drop for Inner { + fn drop(&mut self) { + // InnerMultipartItem::Field has to be dropped first because of Safety. + self.item = Item::None; + } +} + +#[cfg(test)] +mod tests { + use std::time::Duration; + + use actix_http::h1; + use actix_web::{ + http::header::{DispositionParam, DispositionType}, + rt, + test::TestRequest, + web::{BufMut as _, BytesMut}, + FromRequest, + }; + use assert_matches::assert_matches; + use futures_test::stream::StreamTestExt as _; + use futures_util::{stream, StreamExt as _}; + use tokio::sync::mpsc; + use tokio_stream::wrappers::UnboundedReceiverStream; + + use super::*; + + const BOUNDARY: &str = "abbc761f78ff4d7cb7573b5a23f96ef0"; + + #[actix_rt::test] + async fn test_boundary() { + let headers = HeaderMap::new(); + match Multipart::find_ct_and_boundary(&headers) { + Err(Error::ContentTypeMissing) => {} + _ => unreachable!("should not happen"), + } + + let mut headers = HeaderMap::new(); + headers.insert( + header::CONTENT_TYPE, + header::HeaderValue::from_static("test"), + ); + + match Multipart::find_ct_and_boundary(&headers) { + Err(Error::ContentTypeParse) => {} + _ => unreachable!("should not happen"), + } + + let mut headers = HeaderMap::new(); + headers.insert( + header::CONTENT_TYPE, + header::HeaderValue::from_static("multipart/mixed"), + ); + match Multipart::find_ct_and_boundary(&headers) { + Err(Error::BoundaryMissing) => {} + _ => unreachable!("should not happen"), + } + + let mut headers = HeaderMap::new(); + headers.insert( + header::CONTENT_TYPE, + header::HeaderValue::from_static( + "multipart/mixed; boundary=\"5c02368e880e436dab70ed54e1c58209\"", + ), + ); + + assert_eq!( + Multipart::find_ct_and_boundary(&headers).unwrap().1, + "5c02368e880e436dab70ed54e1c58209", + ); + } + + fn create_stream() -> ( + mpsc::UnboundedSender>, + impl Stream>, + ) { + let (tx, rx) = mpsc::unbounded_channel(); + + ( + tx, + UnboundedReceiverStream::new(rx).map(|res| res.map_err(|_| panic!())), + ) + } + + fn create_simple_request_with_header() -> (Bytes, HeaderMap) { + let (body, headers) = crate::test::create_form_data_payload_and_headers_with_boundary( + BOUNDARY, + "file", + Some("fn.txt".to_owned()), + Some(mime::TEXT_PLAIN_UTF_8), + Bytes::from_static(b"data"), + ); + + let mut buf = BytesMut::with_capacity(body.len() + 14); + + // add junk before form to test pre-boundary data rejection + buf.put("testasdadsad\r\n".as_bytes()); + + buf.put(body); + + (buf.freeze(), headers) + } + + // TODO: use test utility when multi-file support is introduced + fn create_double_request_with_header() -> (Bytes, HeaderMap) { + let bytes = Bytes::from( + "testasdadsad\r\n\ + --abbc761f78ff4d7cb7573b5a23f96ef0\r\n\ + Content-Disposition: form-data; name=\"file\"; filename=\"fn.txt\"\r\n\ + Content-Type: text/plain; charset=utf-8\r\nContent-Length: 4\r\n\r\n\ + test\r\n\ + --abbc761f78ff4d7cb7573b5a23f96ef0\r\n\ + Content-Disposition: form-data; name=\"file\"; filename=\"fn.txt\"\r\n\ + Content-Type: text/plain; charset=utf-8\r\nContent-Length: 4\r\n\r\n\ + data\r\n\ + --abbc761f78ff4d7cb7573b5a23f96ef0--\r\n", + ); + let mut headers = HeaderMap::new(); + headers.insert( + header::CONTENT_TYPE, + header::HeaderValue::from_static( + "multipart/mixed; boundary=\"abbc761f78ff4d7cb7573b5a23f96ef0\"", + ), + ); + (bytes, headers) + } + + #[actix_rt::test] + async fn test_multipart_no_end_crlf() { + let (sender, payload) = create_stream(); + let (mut bytes, headers) = create_double_request_with_header(); + let bytes_stripped = bytes.split_to(bytes.len()); // strip crlf + + sender.send(Ok(bytes_stripped)).unwrap(); + drop(sender); // eof + + let mut multipart = Multipart::new(&headers, payload); + + match multipart.next().await.unwrap() { + Ok(_) => {} + _ => unreachable!(), + } + + match multipart.next().await.unwrap() { + Ok(_) => {} + _ => unreachable!(), + } + + match multipart.next().await { + None => {} + _ => unreachable!(), + } + } + + #[actix_rt::test] + async fn test_multipart() { + let (sender, payload) = create_stream(); + let (bytes, headers) = create_double_request_with_header(); + + sender.send(Ok(bytes)).unwrap(); + + let mut multipart = Multipart::new(&headers, payload); + match multipart.next().await { + Some(Ok(mut field)) => { + let cd = field.content_disposition().unwrap(); + assert_eq!(cd.disposition, DispositionType::FormData); + assert_eq!(cd.parameters[0], DispositionParam::Name("file".into())); + + assert_eq!(field.content_type().unwrap().type_(), mime::TEXT); + assert_eq!(field.content_type().unwrap().subtype(), mime::PLAIN); + + match field.next().await.unwrap() { + Ok(chunk) => assert_eq!(chunk, "test"), + _ => unreachable!(), + } + match field.next().await { + None => {} + _ => unreachable!(), + } + } + _ => unreachable!(), + } + + match multipart.next().await.unwrap() { + Ok(mut field) => { + assert_eq!(field.content_type().unwrap().type_(), mime::TEXT); + assert_eq!(field.content_type().unwrap().subtype(), mime::PLAIN); + + match field.next().await { + Some(Ok(chunk)) => assert_eq!(chunk, "data"), + _ => unreachable!(), + } + match field.next().await { + None => {} + _ => unreachable!(), + } + } + _ => unreachable!(), + } + + match multipart.next().await { + None => {} + _ => unreachable!(), + } + } + + // Loops, collecting all bytes until end-of-field + async fn get_whole_field(field: &mut Field) -> BytesMut { + let mut b = BytesMut::new(); + loop { + match field.next().await { + Some(Ok(chunk)) => b.extend_from_slice(&chunk), + None => return b, + _ => unreachable!(), + } + } + } + + #[actix_rt::test] + async fn test_stream() { + let (bytes, headers) = create_double_request_with_header(); + let payload = stream::iter(bytes) + .map(|byte| Ok(Bytes::copy_from_slice(&[byte]))) + .interleave_pending(); + + let mut multipart = Multipart::new(&headers, payload); + match multipart.next().await.unwrap() { + Ok(mut field) => { + let cd = field.content_disposition().unwrap(); + assert_eq!(cd.disposition, DispositionType::FormData); + assert_eq!(cd.parameters[0], DispositionParam::Name("file".into())); + + assert_eq!(field.content_type().unwrap().type_(), mime::TEXT); + assert_eq!(field.content_type().unwrap().subtype(), mime::PLAIN); + + assert_eq!(get_whole_field(&mut field).await, "test"); + } + _ => unreachable!(), + } + + match multipart.next().await { + Some(Ok(mut field)) => { + assert_eq!(field.content_type().unwrap().type_(), mime::TEXT); + assert_eq!(field.content_type().unwrap().subtype(), mime::PLAIN); + + assert_eq!(get_whole_field(&mut field).await, "data"); + } + _ => unreachable!(), + } + + match multipart.next().await { + None => {} + _ => unreachable!(), + } + } + + #[actix_rt::test] + async fn test_multipart_from_error() { + let err = Error::ContentTypeMissing; + let mut multipart = Multipart::from_error(err); + assert!(multipart.next().await.unwrap().is_err()) + } + + #[actix_rt::test] + async fn test_multipart_from_boundary() { + let (_, payload) = create_stream(); + let (_, headers) = create_simple_request_with_header(); + let (ct, boundary) = Multipart::find_ct_and_boundary(&headers).unwrap(); + let _ = Multipart::from_ct_and_boundary(ct, boundary, payload); + } + + #[actix_rt::test] + async fn test_multipart_payload_consumption() { + // with sample payload and HttpRequest with no headers + let (_, inner_payload) = h1::Payload::create(false); + let mut payload = actix_web::dev::Payload::from(inner_payload); + let req = TestRequest::default().to_http_request(); + + // multipart should generate an error + let mut mp = Multipart::from_request(&req, &mut payload).await.unwrap(); + assert!(mp.next().await.unwrap().is_err()); + + // and should not consume the payload + match payload { + actix_web::dev::Payload::H1 { .. } => {} //expected + _ => unreachable!(), + } + } + + #[actix_rt::test] + async fn no_content_disposition_form_data() { + let bytes = Bytes::from( + "testasdadsad\r\n\ + --abbc761f78ff4d7cb7573b5a23f96ef0\r\n\ + Content-Type: text/plain; charset=utf-8\r\n\ + Content-Length: 4\r\n\ + \r\n\ + test\r\n\ + --abbc761f78ff4d7cb7573b5a23f96ef0\r\n", + ); + let mut headers = HeaderMap::new(); + headers.insert( + header::CONTENT_TYPE, + header::HeaderValue::from_static( + "multipart/form-data; boundary=\"abbc761f78ff4d7cb7573b5a23f96ef0\"", + ), + ); + let payload = stream::iter(bytes) + .map(|byte| Ok(Bytes::copy_from_slice(&[byte]))) + .interleave_pending(); + + let mut multipart = Multipart::new(&headers, payload); + let res = multipart.next().await.unwrap(); + assert_matches!( + res.expect_err( + "according to RFC 7578, form-data fields require a content-disposition header" + ), + Error::ContentDispositionMissing + ); + } + + #[actix_rt::test] + async fn no_content_disposition_non_form_data() { + let bytes = Bytes::from( + "testasdadsad\r\n\ + --abbc761f78ff4d7cb7573b5a23f96ef0\r\n\ + Content-Type: text/plain; charset=utf-8\r\n\ + Content-Length: 4\r\n\ + \r\n\ + test\r\n\ + --abbc761f78ff4d7cb7573b5a23f96ef0\r\n", + ); + let mut headers = HeaderMap::new(); + headers.insert( + header::CONTENT_TYPE, + header::HeaderValue::from_static( + "multipart/mixed; boundary=\"abbc761f78ff4d7cb7573b5a23f96ef0\"", + ), + ); + let payload = stream::iter(bytes) + .map(|byte| Ok(Bytes::copy_from_slice(&[byte]))) + .interleave_pending(); + + let mut multipart = Multipart::new(&headers, payload); + let res = multipart.next().await.unwrap(); + res.unwrap(); + } + + #[actix_rt::test] + async fn no_name_in_form_data_content_disposition() { + let bytes = Bytes::from( + "testasdadsad\r\n\ + --abbc761f78ff4d7cb7573b5a23f96ef0\r\n\ + Content-Disposition: form-data; filename=\"fn.txt\"\r\n\ + Content-Type: text/plain; charset=utf-8\r\n\ + Content-Length: 4\r\n\ + \r\n\ + test\r\n\ + --abbc761f78ff4d7cb7573b5a23f96ef0\r\n", + ); + let mut headers = HeaderMap::new(); + headers.insert( + header::CONTENT_TYPE, + header::HeaderValue::from_static( + "multipart/form-data; boundary=\"abbc761f78ff4d7cb7573b5a23f96ef0\"", + ), + ); + let payload = stream::iter(bytes) + .map(|byte| Ok(Bytes::copy_from_slice(&[byte]))) + .interleave_pending(); + + let mut multipart = Multipart::new(&headers, payload); + let res = multipart.next().await.unwrap(); + assert_matches!( + res.expect_err("according to RFC 7578, form-data fields require a name attribute"), + Error::ContentDispositionNameMissing + ); + } + + #[actix_rt::test] + async fn test_drop_multipart_dont_hang() { + let (sender, payload) = create_stream(); + let (bytes, headers) = create_simple_request_with_header(); + sender.send(Ok(bytes)).unwrap(); + drop(sender); // eof + + let mut multipart = Multipart::new(&headers, payload); + let mut field = multipart.next().await.unwrap().unwrap(); + + drop(multipart); + + // should fail immediately + match field.next().await { + Some(Err(Error::NotConsumed)) => {} + _ => panic!(), + }; + } + + #[actix_rt::test] + async fn test_drop_field_awaken_multipart() { + let (sender, payload) = create_stream(); + let (bytes, headers) = create_double_request_with_header(); + sender.send(Ok(bytes)).unwrap(); + drop(sender); // eof + + let mut multipart = Multipart::new(&headers, payload); + let mut field = multipart.next().await.unwrap().unwrap(); + + let task = rt::spawn(async move { + rt::time::sleep(Duration::from_millis(500)).await; + assert_eq!(field.next().await.unwrap().unwrap(), "test"); + drop(field); + }); + + // dropping field should awaken current task + let _ = multipart.next().await.unwrap().unwrap(); + task.await.unwrap(); + } +} diff --git a/actix-multipart/src/payload.rs b/actix-multipart/src/payload.rs new file mode 100644 index 000000000..858634bc0 --- /dev/null +++ b/actix-multipart/src/payload.rs @@ -0,0 +1,255 @@ +use std::{ + cell::{RefCell, RefMut}, + cmp, mem, + pin::Pin, + rc::Rc, + task::{Context, Poll}, +}; + +use actix_web::{ + error::PayloadError, + web::{Bytes, BytesMut}, +}; +use futures_core::stream::{LocalBoxStream, Stream}; + +use crate::{error::Error, safety::Safety}; + +pub(crate) struct PayloadRef { + payload: Rc>, +} + +impl PayloadRef { + pub(crate) fn new(payload: PayloadBuffer) -> PayloadRef { + PayloadRef { + payload: Rc::new(RefCell::new(payload)), + } + } + + pub(crate) fn get_mut(&self, safety: &Safety) -> Option> { + if safety.current() { + Some(self.payload.borrow_mut()) + } else { + None + } + } +} + +impl Clone for PayloadRef { + fn clone(&self) -> PayloadRef { + PayloadRef { + payload: Rc::clone(&self.payload), + } + } +} + +/// Payload buffer. +pub(crate) struct PayloadBuffer { + pub(crate) stream: LocalBoxStream<'static, Result>, + pub(crate) buf: BytesMut, + /// EOF flag. If true, no more payload reads will be attempted. + pub(crate) eof: bool, +} + +impl PayloadBuffer { + /// Constructs new payload buffer. + pub(crate) fn new(stream: S) -> Self + where + S: Stream> + 'static, + { + PayloadBuffer { + stream: Box::pin(stream), + buf: BytesMut::with_capacity(1_024), // pre-allocate 1KiB + eof: false, + } + } + + pub(crate) fn poll_stream(&mut self, cx: &mut Context<'_>) -> Result<(), PayloadError> { + loop { + match Pin::new(&mut self.stream).poll_next(cx) { + Poll::Ready(Some(Ok(data))) => { + self.buf.extend_from_slice(&data); + // try to read more data + continue; + } + Poll::Ready(Some(Err(err))) => return Err(err), + Poll::Ready(None) => { + self.eof = true; + return Ok(()); + } + Poll::Pending => return Ok(()), + } + } + } + + /// Reads exact number of bytes. + #[cfg(test)] + pub(crate) fn read_exact(&mut self, size: usize) -> Option { + if size <= self.buf.len() { + Some(self.buf.split_to(size).freeze()) + } else { + None + } + } + + pub(crate) fn read_max(&mut self, size: u64) -> Result, Error> { + if !self.buf.is_empty() { + let size = cmp::min(self.buf.len() as u64, size) as usize; + Ok(Some(self.buf.split_to(size).freeze())) + } else if self.eof { + Err(Error::Incomplete) + } else { + Ok(None) + } + } + + /// Reads until specified ending. + /// + /// Returns: + /// + /// - `Ok(Some(chunk))` - `needle` is found, with chunk ending after needle + /// - `Err(Incomplete)` - `needle` is not found and we're at EOF + /// - `Ok(None)` - `needle` is not found otherwise + pub(crate) fn read_until(&mut self, needle: &[u8]) -> Result, Error> { + match memchr::memmem::find(&self.buf, needle) { + // buffer exhausted and EOF without finding needle + None if self.eof => Err(Error::Incomplete), + + // needle not yet found + None => Ok(None), + + // needle found, split chunk out of buf + Some(idx) => Ok(Some(self.buf.split_to(idx + needle.len()).freeze())), + } + } + + /// Reads bytes until new line delimiter (`\n`, `0x0A`). + /// + /// Returns: + /// + /// - `Ok(Some(chunk))` - `needle` is found, with chunk ending after needle + /// - `Err(Incomplete)` - `needle` is not found and we're at EOF + /// - `Ok(None)` - `needle` is not found otherwise + #[inline] + pub(crate) fn readline(&mut self) -> Result, Error> { + self.read_until(b"\n") + } + + /// Reads bytes until new line delimiter or until EOF. + #[inline] + pub(crate) fn readline_or_eof(&mut self) -> Result, Error> { + match self.readline() { + Err(Error::Incomplete) if self.eof => Ok(Some(self.buf.split().freeze())), + line => line, + } + } + + /// Puts unprocessed data back to the buffer. + pub(crate) fn unprocessed(&mut self, data: Bytes) { + // TODO: use BytesMut::from when it's released, see https://github.com/tokio-rs/bytes/pull/710 + let buf = BytesMut::from(&data[..]); + let buf = mem::replace(&mut self.buf, buf); + self.buf.extend_from_slice(&buf); + } +} + +#[cfg(test)] +mod tests { + use actix_http::h1; + use futures_util::future::lazy; + + use super::*; + + #[actix_rt::test] + async fn basic() { + let (_, payload) = h1::Payload::create(false); + let mut payload = PayloadBuffer::new(payload); + + assert_eq!(payload.buf.len(), 0); + lazy(|cx| payload.poll_stream(cx)).await.unwrap(); + assert_eq!(None, payload.read_max(1).unwrap()); + } + + #[actix_rt::test] + async fn eof() { + let (mut sender, payload) = h1::Payload::create(false); + let mut payload = PayloadBuffer::new(payload); + + assert_eq!(None, payload.read_max(4).unwrap()); + sender.feed_data(Bytes::from("data")); + sender.feed_eof(); + lazy(|cx| payload.poll_stream(cx)).await.unwrap(); + + assert_eq!(Some(Bytes::from("data")), payload.read_max(4).unwrap()); + assert_eq!(payload.buf.len(), 0); + assert!(payload.read_max(1).is_err()); + assert!(payload.eof); + } + + #[actix_rt::test] + async fn err() { + let (mut sender, payload) = h1::Payload::create(false); + let mut payload = PayloadBuffer::new(payload); + assert_eq!(None, payload.read_max(1).unwrap()); + sender.set_error(PayloadError::Incomplete(None)); + lazy(|cx| payload.poll_stream(cx)).await.err().unwrap(); + } + + #[actix_rt::test] + async fn read_max() { + let (mut sender, payload) = h1::Payload::create(false); + let mut payload = PayloadBuffer::new(payload); + + sender.feed_data(Bytes::from("line1")); + sender.feed_data(Bytes::from("line2")); + lazy(|cx| payload.poll_stream(cx)).await.unwrap(); + assert_eq!(payload.buf.len(), 10); + + assert_eq!(Some(Bytes::from("line1")), payload.read_max(5).unwrap()); + assert_eq!(payload.buf.len(), 5); + + assert_eq!(Some(Bytes::from("line2")), payload.read_max(5).unwrap()); + assert_eq!(payload.buf.len(), 0); + } + + #[actix_rt::test] + async fn read_exactly() { + let (mut sender, payload) = h1::Payload::create(false); + let mut payload = PayloadBuffer::new(payload); + + assert_eq!(None, payload.read_exact(2)); + + sender.feed_data(Bytes::from("line1")); + sender.feed_data(Bytes::from("line2")); + lazy(|cx| payload.poll_stream(cx)).await.unwrap(); + + assert_eq!(Some(Bytes::from_static(b"li")), payload.read_exact(2)); + assert_eq!(payload.buf.len(), 8); + + assert_eq!(Some(Bytes::from_static(b"ne1l")), payload.read_exact(4)); + assert_eq!(payload.buf.len(), 4); + } + + #[actix_rt::test] + async fn read_until() { + let (mut sender, payload) = h1::Payload::create(false); + let mut payload = PayloadBuffer::new(payload); + + assert_eq!(None, payload.read_until(b"ne").unwrap()); + + sender.feed_data(Bytes::from("line1")); + sender.feed_data(Bytes::from("line2")); + lazy(|cx| payload.poll_stream(cx)).await.unwrap(); + + assert_eq!( + Some(Bytes::from("line")), + payload.read_until(b"ne").unwrap() + ); + assert_eq!(payload.buf.len(), 6); + + assert_eq!( + Some(Bytes::from("1line2")), + payload.read_until(b"2").unwrap() + ); + assert_eq!(payload.buf.len(), 0); + } +} diff --git a/actix-multipart/src/safety.rs b/actix-multipart/src/safety.rs new file mode 100644 index 000000000..db6b3b18b --- /dev/null +++ b/actix-multipart/src/safety.rs @@ -0,0 +1,60 @@ +use std::{cell::Cell, marker::PhantomData, rc::Rc, task}; + +use local_waker::LocalWaker; + +/// Counter. It tracks of number of clones of payloads and give access to payload only to top most. +/// +/// - When dropped, parent task is awakened. This is to support the case where `Field` is dropped in +/// a separate task than `Multipart`. +/// - Assumes that parent owners don't move to different tasks; only the top-most is allowed to. +/// - If dropped and is not top most owner, is_clean flag is set to false. +#[derive(Debug)] +pub(crate) struct Safety { + task: LocalWaker, + level: usize, + payload: Rc>, + clean: Rc>, +} + +impl Safety { + pub(crate) fn new() -> Safety { + let payload = Rc::new(PhantomData); + Safety { + task: LocalWaker::new(), + level: Rc::strong_count(&payload), + clean: Rc::new(Cell::new(true)), + payload, + } + } + + pub(crate) fn current(&self) -> bool { + Rc::strong_count(&self.payload) == self.level && self.clean.get() + } + + pub(crate) fn is_clean(&self) -> bool { + self.clean.get() + } + + pub(crate) fn clone(&self, cx: &task::Context<'_>) -> Safety { + let payload = Rc::clone(&self.payload); + let s = Safety { + task: LocalWaker::new(), + level: Rc::strong_count(&payload), + clean: self.clean.clone(), + payload, + }; + s.task.register(cx.waker()); + s + } +} + +impl Drop for Safety { + fn drop(&mut self) { + if Rc::strong_count(&self.payload) != self.level { + // Multipart dropped leaving a Field + self.clean.set(false); + } + + self.task.wake(); + } +} diff --git a/actix-multipart/src/server.rs b/actix-multipart/src/server.rs deleted file mode 100644 index c08031eba..000000000 --- a/actix-multipart/src/server.rs +++ /dev/null @@ -1,1339 +0,0 @@ -//! Multipart response payload support. - -use std::{ - cell::{Cell, RefCell, RefMut}, - cmp, fmt, - marker::PhantomData, - pin::Pin, - rc::Rc, - task::{Context, Poll}, -}; - -use actix_web::{ - error::{ParseError, PayloadError}, - http::header::{self, ContentDisposition, HeaderMap, HeaderName, HeaderValue}, -}; -use bytes::{Bytes, BytesMut}; -use futures_core::stream::{LocalBoxStream, Stream}; -use local_waker::LocalWaker; - -use crate::error::MultipartError; - -const MAX_HEADERS: usize = 32; - -/// The server-side implementation of `multipart/form-data` requests. -/// -/// This will parse the incoming stream into `MultipartItem` instances via its -/// Stream implementation. -/// `MultipartItem::Field` contains multipart field. `MultipartItem::Multipart` -/// is used for nested multipart streams. -pub struct Multipart { - safety: Safety, - error: Option, - inner: Option, -} - -enum InnerMultipartItem { - None, - Field(Rc>), -} - -#[derive(PartialEq, Debug)] -enum InnerState { - /// Stream eof - Eof, - - /// Skip data until first boundary - FirstBoundary, - - /// Reading boundary - Boundary, - - /// Reading Headers, - Headers, -} - -struct InnerMultipart { - payload: PayloadRef, - boundary: String, - state: InnerState, - item: InnerMultipartItem, -} - -impl Multipart { - /// Create multipart instance for boundary. - pub fn new(headers: &HeaderMap, stream: S) -> Multipart - where - S: Stream> + 'static, - { - match Self::boundary(headers) { - Ok(boundary) => Multipart::from_boundary(boundary, stream), - Err(err) => Multipart::from_error(err), - } - } - - /// Extract boundary info from headers. - pub(crate) fn boundary(headers: &HeaderMap) -> Result { - headers - .get(&header::CONTENT_TYPE) - .ok_or(MultipartError::NoContentType)? - .to_str() - .ok() - .and_then(|content_type| content_type.parse::().ok()) - .ok_or(MultipartError::ParseContentType)? - .get_param(mime::BOUNDARY) - .map(|boundary| boundary.as_str().to_owned()) - .ok_or(MultipartError::Boundary) - } - - /// Create multipart instance for given boundary and stream - pub(crate) fn from_boundary(boundary: String, stream: S) -> Multipart - where - S: Stream> + 'static, - { - Multipart { - error: None, - safety: Safety::new(), - inner: Some(InnerMultipart { - boundary, - payload: PayloadRef::new(PayloadBuffer::new(stream)), - state: InnerState::FirstBoundary, - item: InnerMultipartItem::None, - }), - } - } - - /// Create Multipart instance from MultipartError - pub(crate) fn from_error(err: MultipartError) -> Multipart { - Multipart { - error: Some(err), - safety: Safety::new(), - inner: None, - } - } -} - -impl Stream for Multipart { - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.get_mut(); - - match this.inner.as_mut() { - Some(inner) => { - if let Some(mut buffer) = inner.payload.get_mut(&this.safety) { - // check safety and poll read payload to buffer. - buffer.poll_stream(cx)?; - } else if !this.safety.is_clean() { - // safety violation - return Poll::Ready(Some(Err(MultipartError::NotConsumed))); - } else { - return Poll::Pending; - } - - inner.poll(&this.safety, cx) - } - None => Poll::Ready(Some(Err(this - .error - .take() - .expect("Multipart polled after finish")))), - } - } -} - -impl InnerMultipart { - fn read_headers(payload: &mut PayloadBuffer) -> Result, MultipartError> { - match payload.read_until(b"\r\n\r\n")? { - None => { - if payload.eof { - Err(MultipartError::Incomplete) - } else { - Ok(None) - } - } - Some(bytes) => { - let mut hdrs = [httparse::EMPTY_HEADER; MAX_HEADERS]; - match httparse::parse_headers(&bytes, &mut hdrs) { - Ok(httparse::Status::Complete((_, hdrs))) => { - // convert headers - let mut headers = HeaderMap::with_capacity(hdrs.len()); - - for h in hdrs { - let name = - HeaderName::try_from(h.name).map_err(|_| ParseError::Header)?; - let value = - HeaderValue::try_from(h.value).map_err(|_| ParseError::Header)?; - headers.append(name, value); - } - - Ok(Some(headers)) - } - Ok(httparse::Status::Partial) => Err(ParseError::Header.into()), - Err(err) => Err(ParseError::from(err).into()), - } - } - } - } - - fn read_boundary( - payload: &mut PayloadBuffer, - boundary: &str, - ) -> Result, MultipartError> { - // TODO: need to read epilogue - match payload.readline_or_eof()? { - None => { - if payload.eof { - Ok(Some(true)) - } else { - Ok(None) - } - } - Some(chunk) => { - if chunk.len() < boundary.len() + 4 - || &chunk[..2] != b"--" - || &chunk[2..boundary.len() + 2] != boundary.as_bytes() - { - Err(MultipartError::Boundary) - } else if &chunk[boundary.len() + 2..] == b"\r\n" { - Ok(Some(false)) - } else if &chunk[boundary.len() + 2..boundary.len() + 4] == b"--" - && (chunk.len() == boundary.len() + 4 - || &chunk[boundary.len() + 4..] == b"\r\n") - { - Ok(Some(true)) - } else { - Err(MultipartError::Boundary) - } - } - } - } - - fn skip_until_boundary( - payload: &mut PayloadBuffer, - boundary: &str, - ) -> Result, MultipartError> { - let mut eof = false; - loop { - match payload.readline()? { - Some(chunk) => { - if chunk.is_empty() { - return Err(MultipartError::Boundary); - } - if chunk.len() < boundary.len() { - continue; - } - if &chunk[..2] == b"--" && &chunk[2..chunk.len() - 2] == boundary.as_bytes() { - break; - } else { - if chunk.len() < boundary.len() + 2 { - continue; - } - let b: &[u8] = boundary.as_ref(); - if &chunk[..boundary.len()] == b - && &chunk[boundary.len()..boundary.len() + 2] == b"--" - { - eof = true; - break; - } - } - } - None => { - return if payload.eof { - Err(MultipartError::Incomplete) - } else { - Ok(None) - }; - } - } - } - Ok(Some(eof)) - } - - fn poll( - &mut self, - safety: &Safety, - cx: &Context<'_>, - ) -> Poll>> { - if self.state == InnerState::Eof { - Poll::Ready(None) - } else { - // release field - loop { - // Nested multipart streams of fields has to be consumed - // before switching to next - if safety.current() { - let stop = match self.item { - InnerMultipartItem::Field(ref mut field) => { - match field.borrow_mut().poll(safety) { - Poll::Pending => return Poll::Pending, - Poll::Ready(Some(Ok(_))) => continue, - Poll::Ready(Some(Err(err))) => return Poll::Ready(Some(Err(err))), - Poll::Ready(None) => true, - } - } - InnerMultipartItem::None => false, - }; - if stop { - self.item = InnerMultipartItem::None; - } - if let InnerMultipartItem::None = self.item { - break; - } - } - } - - let headers = if let Some(mut payload) = self.payload.get_mut(safety) { - match self.state { - // read until first boundary - InnerState::FirstBoundary => { - match InnerMultipart::skip_until_boundary(&mut payload, &self.boundary)? { - Some(eof) => { - if eof { - self.state = InnerState::Eof; - return Poll::Ready(None); - } else { - self.state = InnerState::Headers; - } - } - None => return Poll::Pending, - } - } - // read boundary - InnerState::Boundary => { - match InnerMultipart::read_boundary(&mut payload, &self.boundary)? { - None => return Poll::Pending, - Some(eof) => { - if eof { - self.state = InnerState::Eof; - return Poll::Ready(None); - } else { - self.state = InnerState::Headers; - } - } - } - } - _ => {} - } - - // read field headers for next field - if self.state == InnerState::Headers { - if let Some(headers) = InnerMultipart::read_headers(&mut payload)? { - self.state = InnerState::Boundary; - headers - } else { - return Poll::Pending; - } - } else { - unreachable!() - } - } else { - log::debug!("NotReady: field is in flight"); - return Poll::Pending; - }; - - // According to RFC 7578 §4.2, a Content-Disposition header must always be present and - // set to "form-data". - - let content_disposition = headers - .get(&header::CONTENT_DISPOSITION) - .and_then(|cd| ContentDisposition::from_raw(cd).ok()) - .filter(|content_disposition| { - let is_form_data = - content_disposition.disposition == header::DispositionType::FormData; - - let has_field_name = content_disposition - .parameters - .iter() - .any(|param| matches!(param, header::DispositionParam::Name(_))); - - is_form_data && has_field_name - }); - - let cd = if let Some(content_disposition) = content_disposition { - content_disposition - } else { - return Poll::Ready(Some(Err(MultipartError::NoContentDisposition))); - }; - - let ct: Option = headers - .get(&header::CONTENT_TYPE) - .and_then(|ct| ct.to_str().ok()) - .and_then(|ct| ct.parse().ok()); - - self.state = InnerState::Boundary; - - // nested multipart stream is not supported - if let Some(mime) = &ct { - if mime.type_() == mime::MULTIPART { - return Poll::Ready(Some(Err(MultipartError::Nested))); - } - } - - let field = - InnerField::new_in_rc(self.payload.clone(), self.boundary.clone(), &headers)?; - - self.item = InnerMultipartItem::Field(Rc::clone(&field)); - - Poll::Ready(Some(Ok(Field::new( - safety.clone(cx), - headers, - ct, - cd, - field, - )))) - } - } -} - -impl Drop for InnerMultipart { - fn drop(&mut self) { - // InnerMultipartItem::Field has to be dropped first because of Safety. - self.item = InnerMultipartItem::None; - } -} - -/// A single field in a multipart stream -pub struct Field { - ct: Option, - cd: ContentDisposition, - headers: HeaderMap, - inner: Rc>, - safety: Safety, -} - -impl Field { - fn new( - safety: Safety, - headers: HeaderMap, - ct: Option, - cd: ContentDisposition, - inner: Rc>, - ) -> Self { - Field { - ct, - cd, - headers, - inner, - safety, - } - } - - /// Returns a reference to the field's header map. - pub fn headers(&self) -> &HeaderMap { - &self.headers - } - - /// Returns a reference to the field's content (mime) type, if it is supplied by the client. - /// - /// According to [RFC 7578](https://www.rfc-editor.org/rfc/rfc7578#section-4.4), if it is not - /// present, it should default to "text/plain". Note it is the responsibility of the client to - /// provide the appropriate content type, there is no attempt to validate this by the server. - pub fn content_type(&self) -> Option<&mime::Mime> { - self.ct.as_ref() - } - - /// Returns the field's Content-Disposition. - /// - /// Per [RFC 7578 §4.2]: "Each part MUST contain a Content-Disposition header field where the - /// disposition type is `form-data`. The Content-Disposition header field MUST also contain an - /// additional parameter of `name`; the value of the `name` parameter is the original field name - /// from the form." - /// - /// This crate validates that it exists before returning a `Field`. As such, it is safe to - /// unwrap `.content_disposition().get_name()`. The [name](Self::name) method is provided as - /// a convenience. - /// - /// [RFC 7578 §4.2]: https://datatracker.ietf.org/doc/html/rfc7578#section-4.2 - pub fn content_disposition(&self) -> &ContentDisposition { - &self.cd - } - - /// Returns the field's name. - /// - /// See [content_disposition](Self::content_disposition) regarding guarantees about existence of - /// the name field. - pub fn name(&self) -> &str { - self.content_disposition() - .get_name() - .expect("field name should be guaranteed to exist in multipart form-data") - } -} - -impl Stream for Field { - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.get_mut(); - let mut inner = this.inner.borrow_mut(); - if let Some(mut buffer) = inner.payload.as_ref().unwrap().get_mut(&this.safety) { - // check safety and poll read payload to buffer. - buffer.poll_stream(cx)?; - } else if !this.safety.is_clean() { - // safety violation - return Poll::Ready(Some(Err(MultipartError::NotConsumed))); - } else { - return Poll::Pending; - } - - inner.poll(&this.safety) - } -} - -impl fmt::Debug for Field { - fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { - if let Some(ct) = &self.ct { - writeln!(f, "\nField: {}", ct)?; - } else { - writeln!(f, "\nField:")?; - } - writeln!(f, " boundary: {}", self.inner.borrow().boundary)?; - writeln!(f, " headers:")?; - for (key, val) in self.headers.iter() { - writeln!(f, " {:?}: {:?}", key, val)?; - } - Ok(()) - } -} - -struct InnerField { - payload: Option, - boundary: String, - eof: bool, - length: Option, -} - -impl InnerField { - fn new_in_rc( - payload: PayloadRef, - boundary: String, - headers: &HeaderMap, - ) -> Result>, PayloadError> { - Self::new(payload, boundary, headers).map(|this| Rc::new(RefCell::new(this))) - } - - fn new( - payload: PayloadRef, - boundary: String, - headers: &HeaderMap, - ) -> Result { - let len = if let Some(len) = headers.get(&header::CONTENT_LENGTH) { - match len.to_str().ok().and_then(|len| len.parse::().ok()) { - Some(len) => Some(len), - None => return Err(PayloadError::Incomplete(None)), - } - } else { - None - }; - - Ok(InnerField { - boundary, - payload: Some(payload), - eof: false, - length: len, - }) - } - - /// Reads body part content chunk of the specified size. - /// The body part must has `Content-Length` header with proper value. - fn read_len( - payload: &mut PayloadBuffer, - size: &mut u64, - ) -> Poll>> { - if *size == 0 { - Poll::Ready(None) - } else { - match payload.read_max(*size)? { - Some(mut chunk) => { - let len = cmp::min(chunk.len() as u64, *size); - *size -= len; - let ch = chunk.split_to(len as usize); - if !chunk.is_empty() { - payload.unprocessed(chunk); - } - Poll::Ready(Some(Ok(ch))) - } - None => { - if payload.eof && (*size != 0) { - Poll::Ready(Some(Err(MultipartError::Incomplete))) - } else { - Poll::Pending - } - } - } - } - } - - /// Reads content chunk of body part with unknown length. - /// The `Content-Length` header for body part is not necessary. - fn read_stream( - payload: &mut PayloadBuffer, - boundary: &str, - ) -> Poll>> { - let mut pos = 0; - - let len = payload.buf.len(); - if len == 0 { - return if payload.eof { - Poll::Ready(Some(Err(MultipartError::Incomplete))) - } else { - Poll::Pending - }; - } - - // check boundary - if len > 4 && payload.buf[0] == b'\r' { - let b_len = if &payload.buf[..2] == b"\r\n" && &payload.buf[2..4] == b"--" { - Some(4) - } else if &payload.buf[1..3] == b"--" { - Some(3) - } else { - None - }; - - if let Some(b_len) = b_len { - let b_size = boundary.len() + b_len; - if len < b_size { - return Poll::Pending; - } else if &payload.buf[b_len..b_size] == boundary.as_bytes() { - // found boundary - return Poll::Ready(None); - } - } - } - - loop { - return if let Some(idx) = memchr::memmem::find(&payload.buf[pos..], b"\r") { - let cur = pos + idx; - - // check if we have enough data for boundary detection - if cur + 4 > len { - if cur > 0 { - Poll::Ready(Some(Ok(payload.buf.split_to(cur).freeze()))) - } else { - Poll::Pending - } - } else { - // check boundary - if (&payload.buf[cur..cur + 2] == b"\r\n" - && &payload.buf[cur + 2..cur + 4] == b"--") - || (&payload.buf[cur..=cur] == b"\r" - && &payload.buf[cur + 1..cur + 3] == b"--") - { - if cur != 0 { - // return buffer - Poll::Ready(Some(Ok(payload.buf.split_to(cur).freeze()))) - } else { - pos = cur + 1; - continue; - } - } else { - // not boundary - pos = cur + 1; - continue; - } - } - } else { - Poll::Ready(Some(Ok(payload.buf.split().freeze()))) - }; - } - } - - fn poll(&mut self, s: &Safety) -> Poll>> { - if self.payload.is_none() { - return Poll::Ready(None); - } - - let result = if let Some(mut payload) = self.payload.as_ref().unwrap().get_mut(s) { - if !self.eof { - let res = if let Some(ref mut len) = self.length { - InnerField::read_len(&mut payload, len) - } else { - InnerField::read_stream(&mut payload, &self.boundary) - }; - - match res { - Poll::Pending => return Poll::Pending, - Poll::Ready(Some(Ok(bytes))) => return Poll::Ready(Some(Ok(bytes))), - Poll::Ready(Some(Err(err))) => return Poll::Ready(Some(Err(err))), - Poll::Ready(None) => self.eof = true, - } - } - - match payload.readline() { - Ok(None) => Poll::Pending, - Ok(Some(line)) => { - if line.as_ref() != b"\r\n" { - log::warn!("multipart field did not read all the data or it is malformed"); - } - Poll::Ready(None) - } - Err(err) => Poll::Ready(Some(Err(err))), - } - } else { - Poll::Pending - }; - - if let Poll::Ready(None) = result { - self.payload.take(); - } - result - } -} - -struct PayloadRef { - payload: Rc>, -} - -impl PayloadRef { - fn new(payload: PayloadBuffer) -> PayloadRef { - PayloadRef { - payload: Rc::new(payload.into()), - } - } - - fn get_mut(&self, s: &Safety) -> Option> { - if s.current() { - Some(self.payload.borrow_mut()) - } else { - None - } - } -} - -impl Clone for PayloadRef { - fn clone(&self) -> PayloadRef { - PayloadRef { - payload: Rc::clone(&self.payload), - } - } -} - -/// Counter. It tracks of number of clones of payloads and give access to payload only to top most. -/// * When dropped, parent task is awakened. This is to support the case where Field is -/// dropped in a separate task than Multipart. -/// * Assumes that parent owners don't move to different tasks; only the top-most is allowed to. -/// * If dropped and is not top most owner, is_clean flag is set to false. -#[derive(Debug)] -struct Safety { - task: LocalWaker, - level: usize, - payload: Rc>, - clean: Rc>, -} - -impl Safety { - fn new() -> Safety { - let payload = Rc::new(PhantomData); - Safety { - task: LocalWaker::new(), - level: Rc::strong_count(&payload), - clean: Rc::new(Cell::new(true)), - payload, - } - } - - fn current(&self) -> bool { - Rc::strong_count(&self.payload) == self.level && self.clean.get() - } - - fn is_clean(&self) -> bool { - self.clean.get() - } - - fn clone(&self, cx: &Context<'_>) -> Safety { - let payload = Rc::clone(&self.payload); - let s = Safety { - task: LocalWaker::new(), - level: Rc::strong_count(&payload), - clean: self.clean.clone(), - payload, - }; - s.task.register(cx.waker()); - s - } -} - -impl Drop for Safety { - fn drop(&mut self) { - if Rc::strong_count(&self.payload) != self.level { - // Multipart dropped leaving a Field - self.clean.set(false); - } - - self.task.wake(); - } -} - -/// Payload buffer. -struct PayloadBuffer { - eof: bool, - buf: BytesMut, - stream: LocalBoxStream<'static, Result>, -} - -impl PayloadBuffer { - /// Constructs new `PayloadBuffer` instance. - fn new(stream: S) -> Self - where - S: Stream> + 'static, - { - PayloadBuffer { - eof: false, - buf: BytesMut::new(), - stream: Box::pin(stream), - } - } - - fn poll_stream(&mut self, cx: &mut Context<'_>) -> Result<(), PayloadError> { - loop { - match Pin::new(&mut self.stream).poll_next(cx) { - Poll::Ready(Some(Ok(data))) => self.buf.extend_from_slice(&data), - Poll::Ready(Some(Err(err))) => return Err(err), - Poll::Ready(None) => { - self.eof = true; - return Ok(()); - } - Poll::Pending => return Ok(()), - } - } - } - - /// Read exact number of bytes - #[cfg(test)] - fn read_exact(&mut self, size: usize) -> Option { - if size <= self.buf.len() { - Some(self.buf.split_to(size).freeze()) - } else { - None - } - } - - fn read_max(&mut self, size: u64) -> Result, MultipartError> { - if !self.buf.is_empty() { - let size = std::cmp::min(self.buf.len() as u64, size) as usize; - Ok(Some(self.buf.split_to(size).freeze())) - } else if self.eof { - Err(MultipartError::Incomplete) - } else { - Ok(None) - } - } - - /// Read until specified ending - fn read_until(&mut self, line: &[u8]) -> Result, MultipartError> { - let res = memchr::memmem::find(&self.buf, line) - .map(|idx| self.buf.split_to(idx + line.len()).freeze()); - - if res.is_none() && self.eof { - Err(MultipartError::Incomplete) - } else { - Ok(res) - } - } - - /// Read bytes until new line delimiter - fn readline(&mut self) -> Result, MultipartError> { - self.read_until(b"\n") - } - - /// Read bytes until new line delimiter or eof - fn readline_or_eof(&mut self) -> Result, MultipartError> { - match self.readline() { - Err(MultipartError::Incomplete) if self.eof => Ok(Some(self.buf.split().freeze())), - line => line, - } - } - - /// Put unprocessed data back to the buffer - fn unprocessed(&mut self, data: Bytes) { - let buf = BytesMut::from(data.as_ref()); - let buf = std::mem::replace(&mut self.buf, buf); - self.buf.extend_from_slice(&buf); - } -} - -#[cfg(test)] -mod tests { - use std::time::Duration; - - use actix_http::h1; - use actix_web::{ - http::header::{DispositionParam, DispositionType}, - rt, - test::TestRequest, - FromRequest, - }; - use bytes::Bytes; - use futures_util::{future::lazy, StreamExt as _}; - use tokio::sync::mpsc; - use tokio_stream::wrappers::UnboundedReceiverStream; - - use super::*; - - #[actix_rt::test] - async fn test_boundary() { - let headers = HeaderMap::new(); - match Multipart::boundary(&headers) { - Err(MultipartError::NoContentType) => {} - _ => unreachable!("should not happen"), - } - - let mut headers = HeaderMap::new(); - headers.insert( - header::CONTENT_TYPE, - header::HeaderValue::from_static("test"), - ); - - match Multipart::boundary(&headers) { - Err(MultipartError::ParseContentType) => {} - _ => unreachable!("should not happen"), - } - - let mut headers = HeaderMap::new(); - headers.insert( - header::CONTENT_TYPE, - header::HeaderValue::from_static("multipart/mixed"), - ); - match Multipart::boundary(&headers) { - Err(MultipartError::Boundary) => {} - _ => unreachable!("should not happen"), - } - - let mut headers = HeaderMap::new(); - headers.insert( - header::CONTENT_TYPE, - header::HeaderValue::from_static( - "multipart/mixed; boundary=\"5c02368e880e436dab70ed54e1c58209\"", - ), - ); - - assert_eq!( - Multipart::boundary(&headers).unwrap(), - "5c02368e880e436dab70ed54e1c58209" - ); - } - - fn create_stream() -> ( - mpsc::UnboundedSender>, - impl Stream>, - ) { - let (tx, rx) = mpsc::unbounded_channel(); - - ( - tx, - UnboundedReceiverStream::new(rx).map(|res| res.map_err(|_| panic!())), - ) - } - - // Stream that returns from a Bytes, one char at a time and Pending every other poll() - struct SlowStream { - bytes: Bytes, - pos: usize, - ready: bool, - } - - impl SlowStream { - fn new(bytes: Bytes) -> SlowStream { - SlowStream { - bytes, - pos: 0, - ready: false, - } - } - } - - impl Stream for SlowStream { - type Item = Result; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - let this = self.get_mut(); - if !this.ready { - this.ready = true; - cx.waker().wake_by_ref(); - return Poll::Pending; - } - - if this.pos == this.bytes.len() { - return Poll::Ready(None); - } - - let res = Poll::Ready(Some(Ok(this.bytes.slice(this.pos..(this.pos + 1))))); - this.pos += 1; - this.ready = false; - res - } - } - - fn create_simple_request_with_header() -> (Bytes, HeaderMap) { - let bytes = Bytes::from( - "testasdadsad\r\n\ - --abbc761f78ff4d7cb7573b5a23f96ef0\r\n\ - Content-Disposition: form-data; name=\"file\"; filename=\"fn.txt\"\r\n\ - Content-Type: text/plain; charset=utf-8\r\nContent-Length: 4\r\n\r\n\ - test\r\n\ - --abbc761f78ff4d7cb7573b5a23f96ef0\r\n\ - Content-Disposition: form-data; name=\"file\"; filename=\"fn.txt\"\r\n\ - Content-Type: text/plain; charset=utf-8\r\nContent-Length: 4\r\n\r\n\ - data\r\n\ - --abbc761f78ff4d7cb7573b5a23f96ef0--\r\n", - ); - let mut headers = HeaderMap::new(); - headers.insert( - header::CONTENT_TYPE, - header::HeaderValue::from_static( - "multipart/mixed; boundary=\"abbc761f78ff4d7cb7573b5a23f96ef0\"", - ), - ); - (bytes, headers) - } - - #[actix_rt::test] - async fn test_multipart_no_end_crlf() { - let (sender, payload) = create_stream(); - let (mut bytes, headers) = create_simple_request_with_header(); - let bytes_stripped = bytes.split_to(bytes.len()); // strip crlf - - sender.send(Ok(bytes_stripped)).unwrap(); - drop(sender); // eof - - let mut multipart = Multipart::new(&headers, payload); - - match multipart.next().await.unwrap() { - Ok(_) => {} - _ => unreachable!(), - } - - match multipart.next().await.unwrap() { - Ok(_) => {} - _ => unreachable!(), - } - - match multipart.next().await { - None => {} - _ => unreachable!(), - } - } - - #[actix_rt::test] - async fn test_multipart() { - let (sender, payload) = create_stream(); - let (bytes, headers) = create_simple_request_with_header(); - - sender.send(Ok(bytes)).unwrap(); - - let mut multipart = Multipart::new(&headers, payload); - match multipart.next().await { - Some(Ok(mut field)) => { - let cd = field.content_disposition(); - assert_eq!(cd.disposition, DispositionType::FormData); - assert_eq!(cd.parameters[0], DispositionParam::Name("file".into())); - - assert_eq!(field.content_type().unwrap().type_(), mime::TEXT); - assert_eq!(field.content_type().unwrap().subtype(), mime::PLAIN); - - match field.next().await.unwrap() { - Ok(chunk) => assert_eq!(chunk, "test"), - _ => unreachable!(), - } - match field.next().await { - None => {} - _ => unreachable!(), - } - } - _ => unreachable!(), - } - - match multipart.next().await.unwrap() { - Ok(mut field) => { - assert_eq!(field.content_type().unwrap().type_(), mime::TEXT); - assert_eq!(field.content_type().unwrap().subtype(), mime::PLAIN); - - match field.next().await { - Some(Ok(chunk)) => assert_eq!(chunk, "data"), - _ => unreachable!(), - } - match field.next().await { - None => {} - _ => unreachable!(), - } - } - _ => unreachable!(), - } - - match multipart.next().await { - None => {} - _ => unreachable!(), - } - } - - // Loops, collecting all bytes until end-of-field - async fn get_whole_field(field: &mut Field) -> BytesMut { - let mut b = BytesMut::new(); - loop { - match field.next().await { - Some(Ok(chunk)) => b.extend_from_slice(&chunk), - None => return b, - _ => unreachable!(), - } - } - } - - #[actix_rt::test] - async fn test_stream() { - let (bytes, headers) = create_simple_request_with_header(); - let payload = SlowStream::new(bytes); - - let mut multipart = Multipart::new(&headers, payload); - match multipart.next().await.unwrap() { - Ok(mut field) => { - let cd = field.content_disposition(); - assert_eq!(cd.disposition, DispositionType::FormData); - assert_eq!(cd.parameters[0], DispositionParam::Name("file".into())); - - assert_eq!(field.content_type().unwrap().type_(), mime::TEXT); - assert_eq!(field.content_type().unwrap().subtype(), mime::PLAIN); - - assert_eq!(get_whole_field(&mut field).await, "test"); - } - _ => unreachable!(), - } - - match multipart.next().await { - Some(Ok(mut field)) => { - assert_eq!(field.content_type().unwrap().type_(), mime::TEXT); - assert_eq!(field.content_type().unwrap().subtype(), mime::PLAIN); - - assert_eq!(get_whole_field(&mut field).await, "data"); - } - _ => unreachable!(), - } - - match multipart.next().await { - None => {} - _ => unreachable!(), - } - } - - #[actix_rt::test] - async fn test_basic() { - let (_, payload) = h1::Payload::create(false); - let mut payload = PayloadBuffer::new(payload); - - assert_eq!(payload.buf.len(), 0); - lazy(|cx| payload.poll_stream(cx)).await.unwrap(); - assert_eq!(None, payload.read_max(1).unwrap()); - } - - #[actix_rt::test] - async fn test_eof() { - let (mut sender, payload) = h1::Payload::create(false); - let mut payload = PayloadBuffer::new(payload); - - assert_eq!(None, payload.read_max(4).unwrap()); - sender.feed_data(Bytes::from("data")); - sender.feed_eof(); - lazy(|cx| payload.poll_stream(cx)).await.unwrap(); - - assert_eq!(Some(Bytes::from("data")), payload.read_max(4).unwrap()); - assert_eq!(payload.buf.len(), 0); - assert!(payload.read_max(1).is_err()); - assert!(payload.eof); - } - - #[actix_rt::test] - async fn test_err() { - let (mut sender, payload) = h1::Payload::create(false); - let mut payload = PayloadBuffer::new(payload); - assert_eq!(None, payload.read_max(1).unwrap()); - sender.set_error(PayloadError::Incomplete(None)); - lazy(|cx| payload.poll_stream(cx)).await.err().unwrap(); - } - - #[actix_rt::test] - async fn test_readmax() { - let (mut sender, payload) = h1::Payload::create(false); - let mut payload = PayloadBuffer::new(payload); - - sender.feed_data(Bytes::from("line1")); - sender.feed_data(Bytes::from("line2")); - lazy(|cx| payload.poll_stream(cx)).await.unwrap(); - assert_eq!(payload.buf.len(), 10); - - assert_eq!(Some(Bytes::from("line1")), payload.read_max(5).unwrap()); - assert_eq!(payload.buf.len(), 5); - - assert_eq!(Some(Bytes::from("line2")), payload.read_max(5).unwrap()); - assert_eq!(payload.buf.len(), 0); - } - - #[actix_rt::test] - async fn test_readexactly() { - let (mut sender, payload) = h1::Payload::create(false); - let mut payload = PayloadBuffer::new(payload); - - assert_eq!(None, payload.read_exact(2)); - - sender.feed_data(Bytes::from("line1")); - sender.feed_data(Bytes::from("line2")); - lazy(|cx| payload.poll_stream(cx)).await.unwrap(); - - assert_eq!(Some(Bytes::from_static(b"li")), payload.read_exact(2)); - assert_eq!(payload.buf.len(), 8); - - assert_eq!(Some(Bytes::from_static(b"ne1l")), payload.read_exact(4)); - assert_eq!(payload.buf.len(), 4); - } - - #[actix_rt::test] - async fn test_readuntil() { - let (mut sender, payload) = h1::Payload::create(false); - let mut payload = PayloadBuffer::new(payload); - - assert_eq!(None, payload.read_until(b"ne").unwrap()); - - sender.feed_data(Bytes::from("line1")); - sender.feed_data(Bytes::from("line2")); - lazy(|cx| payload.poll_stream(cx)).await.unwrap(); - - assert_eq!( - Some(Bytes::from("line")), - payload.read_until(b"ne").unwrap() - ); - assert_eq!(payload.buf.len(), 6); - - assert_eq!( - Some(Bytes::from("1line2")), - payload.read_until(b"2").unwrap() - ); - assert_eq!(payload.buf.len(), 0); - } - - #[actix_rt::test] - async fn test_multipart_from_error() { - let err = MultipartError::NoContentType; - let mut multipart = Multipart::from_error(err); - assert!(multipart.next().await.unwrap().is_err()) - } - - #[actix_rt::test] - async fn test_multipart_from_boundary() { - let (_, payload) = create_stream(); - let (_, headers) = create_simple_request_with_header(); - let boundary = Multipart::boundary(&headers); - assert!(boundary.is_ok()); - let _ = Multipart::from_boundary(boundary.unwrap(), payload); - } - - #[actix_rt::test] - async fn test_multipart_payload_consumption() { - // with sample payload and HttpRequest with no headers - let (_, inner_payload) = h1::Payload::create(false); - let mut payload = actix_web::dev::Payload::from(inner_payload); - let req = TestRequest::default().to_http_request(); - - // multipart should generate an error - let mut mp = Multipart::from_request(&req, &mut payload).await.unwrap(); - assert!(mp.next().await.unwrap().is_err()); - - // and should not consume the payload - match payload { - actix_web::dev::Payload::H1 { .. } => {} //expected - _ => unreachable!(), - } - } - - #[actix_rt::test] - async fn no_content_disposition() { - let bytes = Bytes::from( - "testasdadsad\r\n\ - --abbc761f78ff4d7cb7573b5a23f96ef0\r\n\ - Content-Type: text/plain; charset=utf-8\r\nContent-Length: 4\r\n\r\n\ - test\r\n\ - --abbc761f78ff4d7cb7573b5a23f96ef0\r\n", - ); - let mut headers = HeaderMap::new(); - headers.insert( - header::CONTENT_TYPE, - header::HeaderValue::from_static( - "multipart/mixed; boundary=\"abbc761f78ff4d7cb7573b5a23f96ef0\"", - ), - ); - let payload = SlowStream::new(bytes); - - let mut multipart = Multipart::new(&headers, payload); - let res = multipart.next().await.unwrap(); - assert!(res.is_err()); - assert!(matches!( - res.unwrap_err(), - MultipartError::NoContentDisposition, - )); - } - - #[actix_rt::test] - async fn no_name_in_content_disposition() { - let bytes = Bytes::from( - "testasdadsad\r\n\ - --abbc761f78ff4d7cb7573b5a23f96ef0\r\n\ - Content-Disposition: form-data; filename=\"fn.txt\"\r\n\ - Content-Type: text/plain; charset=utf-8\r\nContent-Length: 4\r\n\r\n\ - test\r\n\ - --abbc761f78ff4d7cb7573b5a23f96ef0\r\n", - ); - let mut headers = HeaderMap::new(); - headers.insert( - header::CONTENT_TYPE, - header::HeaderValue::from_static( - "multipart/mixed; boundary=\"abbc761f78ff4d7cb7573b5a23f96ef0\"", - ), - ); - let payload = SlowStream::new(bytes); - - let mut multipart = Multipart::new(&headers, payload); - let res = multipart.next().await.unwrap(); - assert!(res.is_err()); - assert!(matches!( - res.unwrap_err(), - MultipartError::NoContentDisposition, - )); - } - - #[actix_rt::test] - async fn test_drop_multipart_dont_hang() { - let (sender, payload) = create_stream(); - let (bytes, headers) = create_simple_request_with_header(); - sender.send(Ok(bytes)).unwrap(); - drop(sender); // eof - - let mut multipart = Multipart::new(&headers, payload); - let mut field = multipart.next().await.unwrap().unwrap(); - - drop(multipart); - - // should fail immediately - match field.next().await { - Some(Err(MultipartError::NotConsumed)) => {} - _ => panic!(), - }; - } - - #[actix_rt::test] - async fn test_drop_field_awaken_multipart() { - let (sender, payload) = create_stream(); - let (bytes, headers) = create_simple_request_with_header(); - sender.send(Ok(bytes)).unwrap(); - drop(sender); // eof - - let mut multipart = Multipart::new(&headers, payload); - let mut field = multipart.next().await.unwrap().unwrap(); - - let task = rt::spawn(async move { - rt::time::sleep(Duration::from_secs(1)).await; - assert_eq!(field.next().await.unwrap().unwrap(), "test"); - drop(field); - }); - - // dropping field should awaken current task - let _ = multipart.next().await.unwrap().unwrap(); - task.await.unwrap(); - } -} diff --git a/actix-multipart/src/test.rs b/actix-multipart/src/test.rs new file mode 100644 index 000000000..7dec85f8e --- /dev/null +++ b/actix-multipart/src/test.rs @@ -0,0 +1,220 @@ +//! Multipart testing utilities. + +use actix_web::{ + http::header::{self, HeaderMap}, + web::{BufMut as _, Bytes, BytesMut}, +}; +use mime::Mime; +use rand::{ + distributions::{Alphanumeric, DistString as _}, + thread_rng, +}; + +const CRLF: &[u8] = b"\r\n"; +const CRLF_CRLF: &[u8] = b"\r\n\r\n"; +const HYPHENS: &[u8] = b"--"; +const BOUNDARY_PREFIX: &str = "------------------------"; + +/// Constructs a `multipart/form-data` payload from bytes and metadata. +/// +/// Returned header map can be extended or merged with existing headers. +/// +/// Multipart boundary used is a random alphanumeric string. +/// +/// # Examples +/// +/// ``` +/// use actix_multipart::test::create_form_data_payload_and_headers; +/// use actix_web::{test::TestRequest, web::Bytes}; +/// use memchr::memmem::find; +/// +/// let (body, headers) = create_form_data_payload_and_headers( +/// "foo", +/// Some("lorem.txt".to_owned()), +/// Some(mime::TEXT_PLAIN_UTF_8), +/// Bytes::from_static(b"Lorem ipsum."), +/// ); +/// +/// assert!(find(&body, b"foo").is_some()); +/// assert!(find(&body, b"lorem.txt").is_some()); +/// assert!(find(&body, b"text/plain; charset=utf-8").is_some()); +/// assert!(find(&body, b"Lorem ipsum.").is_some()); +/// +/// let req = TestRequest::default(); +/// +/// // merge header map into existing test request and set multipart body +/// let req = headers +/// .into_iter() +/// .fold(req, |req, hdr| req.insert_header(hdr)) +/// .set_payload(body) +/// .to_http_request(); +/// +/// assert!( +/// req.headers() +/// .get("content-type") +/// .unwrap() +/// .to_str() +/// .unwrap() +/// .starts_with("multipart/form-data; boundary=\"") +/// ); +/// ``` +pub fn create_form_data_payload_and_headers( + name: &str, + filename: Option, + content_type: Option, + file: Bytes, +) -> (Bytes, HeaderMap) { + let boundary = Alphanumeric.sample_string(&mut thread_rng(), 32); + + create_form_data_payload_and_headers_with_boundary( + &boundary, + name, + filename, + content_type, + file, + ) +} + +/// Constructs a `multipart/form-data` payload from bytes and metadata with a fixed boundary. +/// +/// See [`create_form_data_payload_and_headers`] for more details. +pub fn create_form_data_payload_and_headers_with_boundary( + boundary: &str, + name: &str, + filename: Option, + content_type: Option, + file: Bytes, +) -> (Bytes, HeaderMap) { + let mut buf = BytesMut::with_capacity(file.len() + 128); + + let boundary_str = [BOUNDARY_PREFIX, boundary].concat(); + let boundary = boundary_str.as_bytes(); + + buf.put(HYPHENS); + buf.put(boundary); + buf.put(CRLF); + + buf.put(format!("Content-Disposition: form-data; name=\"{name}\"").as_bytes()); + if let Some(filename) = filename { + buf.put(format!("; filename=\"{filename}\"").as_bytes()); + } + buf.put(CRLF); + + if let Some(ct) = content_type { + buf.put(format!("Content-Type: {ct}").as_bytes()); + buf.put(CRLF); + } + + buf.put(format!("Content-Length: {}", file.len()).as_bytes()); + buf.put(CRLF_CRLF); + + buf.put(file); + buf.put(CRLF); + + buf.put(HYPHENS); + buf.put(boundary); + buf.put(HYPHENS); + buf.put(CRLF); + + let mut headers = HeaderMap::new(); + headers.insert( + header::CONTENT_TYPE, + format!("multipart/form-data; boundary=\"{boundary_str}\"") + .parse() + .unwrap(), + ); + + (buf.freeze(), headers) +} + +#[cfg(test)] +mod tests { + use std::convert::Infallible; + + use futures_util::stream; + + use super::*; + + fn find_boundary(headers: &HeaderMap) -> String { + headers + .get("content-type") + .unwrap() + .to_str() + .unwrap() + .parse::() + .unwrap() + .get_param(mime::BOUNDARY) + .unwrap() + .as_str() + .to_owned() + } + + #[test] + fn wire_format() { + let (pl, headers) = create_form_data_payload_and_headers_with_boundary( + "qWeRtYuIoP", + "foo", + None, + None, + Bytes::from_static(b"Lorem ipsum dolor\nsit ame."), + ); + + assert_eq!( + find_boundary(&headers), + "------------------------qWeRtYuIoP", + ); + + assert_eq!( + std::str::from_utf8(&pl).unwrap(), + "--------------------------qWeRtYuIoP\r\n\ + Content-Disposition: form-data; name=\"foo\"\r\n\ + Content-Length: 26\r\n\ + \r\n\ + Lorem ipsum dolor\n\ + sit ame.\r\n\ + --------------------------qWeRtYuIoP--\r\n", + ); + + let (pl, _headers) = create_form_data_payload_and_headers_with_boundary( + "qWeRtYuIoP", + "foo", + Some("Lorem.txt".to_owned()), + Some(mime::TEXT_PLAIN_UTF_8), + Bytes::from_static(b"Lorem ipsum dolor\nsit ame."), + ); + + assert_eq!( + std::str::from_utf8(&pl).unwrap(), + "--------------------------qWeRtYuIoP\r\n\ + Content-Disposition: form-data; name=\"foo\"; filename=\"Lorem.txt\"\r\n\ + Content-Type: text/plain; charset=utf-8\r\n\ + Content-Length: 26\r\n\ + \r\n\ + Lorem ipsum dolor\n\ + sit ame.\r\n\ + --------------------------qWeRtYuIoP--\r\n", + ); + } + + /// Test using an external library to prevent the two-wrongs-make-a-right class of errors. + #[actix_web::test] + async fn ecosystem_compat() { + let (pl, headers) = create_form_data_payload_and_headers( + "foo", + None, + None, + Bytes::from_static(b"Lorem ipsum dolor\nsit ame."), + ); + + let boundary = find_boundary(&headers); + + let pl = stream::once(async { Ok::<_, Infallible>(pl) }); + + let mut form = multer::Multipart::new(pl, boundary); + let field = form.next_field().await.unwrap().unwrap(); + assert_eq!(field.name().unwrap(), "foo"); + assert_eq!(field.file_name(), None); + assert_eq!(field.content_type(), None); + assert!(field.bytes().await.unwrap().starts_with(b"Lorem")); + } +} diff --git a/actix-router/CHANGES.md b/actix-router/CHANGES.md index 31316ff47..6305b45c3 100644 --- a/actix-router/CHANGES.md +++ b/actix-router/CHANGES.md @@ -2,6 +2,13 @@ ## Unreleased +## 0.5.3 + +- Add `unicode` crate feature (on-by-default) to switch between `regex` and `regex-lite` as a trade-off between full unicode support and binary size. +- Minimum supported Rust version (MSRV) is now 1.72. + +## 0.5.2 + - Minimum supported Rust version (MSRV) is now 1.68 due to transitive `time` dependency. ## 0.5.1 diff --git a/actix-router/Cargo.toml b/actix-router/Cargo.toml index 8a404dd20..7def1bdb4 100644 --- a/actix-router/Cargo.toml +++ b/actix-router/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "actix-router" -version = "0.5.1" +version = "0.5.3" authors = [ "Nikolay Kim ", "Ali MJ Al-Nasrawy ", @@ -12,17 +12,23 @@ repository = "https://github.com/actix/actix-web" license = "MIT OR Apache-2.0" edition = "2021" -[lib] -name = "actix_router" -path = "src/lib.rs" +[package.metadata.cargo_check_external_types] +allowed_external_types = [ + "http::*", + "serde::*", +] [features] -default = ["http"] +default = ["http", "unicode"] +http = ["dep:http"] +unicode = ["dep:regex"] [dependencies] bytestring = ">=0.1.5, <2" +cfg-if = "1" http = { version = "0.2.7", optional = true } -regex = "1.5" +regex = { version = "1.5", optional = true } +regex-lite = "0.1" serde = "1" tracing = { version = "0.1.30", default-features = false, features = ["log"] } @@ -32,9 +38,13 @@ http = "0.2.7" serde = { version = "1", features = ["derive"] } percent-encoding = "2.1" +[lints] +workspace = true + [[bench]] name = "router" harness = false +required-features = ["unicode"] [[bench]] name = "quoter" diff --git a/actix-router/README.md b/actix-router/README.md index 15a449c44..12d1b0146 100644 --- a/actix-router/README.md +++ b/actix-router/README.md @@ -1,14 +1,18 @@ # `actix-router` + + [![crates.io](https://img.shields.io/crates/v/actix-router?label=latest)](https://crates.io/crates/actix-router) -[![Documentation](https://docs.rs/actix-router/badge.svg?version=0.5.1)](https://docs.rs/actix-router/0.5.1) -![Version](https://img.shields.io/badge/rustc-1.68+-ab6000.svg) +[![Documentation](https://docs.rs/actix-router/badge.svg?version=0.5.3)](https://docs.rs/actix-router/0.5.3) +![Version](https://img.shields.io/badge/rustc-1.72+-ab6000.svg) ![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-router.svg)
-[![dependency status](https://deps.rs/crate/actix-router/0.5.1/status.svg)](https://deps.rs/crate/actix-router/0.5.1) +[![dependency status](https://deps.rs/crate/actix-router/0.5.3/status.svg)](https://deps.rs/crate/actix-router/0.5.3) [![Download](https://img.shields.io/crates/d/actix-router.svg)](https://crates.io/crates/actix-router) [![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x) + + Resource path matching and router. diff --git a/actix-router/src/de.rs b/actix-router/src/de.rs index e8c7c658e..2f50619f8 100644 --- a/actix-router/src/de.rs +++ b/actix-router/src/de.rs @@ -500,10 +500,10 @@ impl<'de> de::VariantAccess<'de> for UnitVariant { #[cfg(test)] mod tests { - use serde::{de, Deserialize}; + use serde::Deserialize; use super::*; - use crate::{path::Path, router::Router, ResourceDef}; + use crate::{router::Router, ResourceDef}; #[derive(Deserialize)] struct MyStruct { @@ -511,11 +511,6 @@ mod tests { value: String, } - #[derive(Deserialize)] - struct Id { - _id: String, - } - #[derive(Debug, Deserialize)] struct Test1(String, u32); diff --git a/actix-router/src/lib.rs b/actix-router/src/lib.rs index f10093436..3f5e969e7 100644 --- a/actix-router/src/lib.rs +++ b/actix-router/src/lib.rs @@ -1,7 +1,5 @@ //! Resource path matching and router. -#![deny(rust_2018_idioms, nonstandard_style)] -#![warn(future_incompatible)] #![doc(html_logo_url = "https://actix.rs/img/logo.png")] #![doc(html_favicon_url = "https://actix.rs/favicon.ico")] #![cfg_attr(docsrs, feature(doc_auto_cfg))] @@ -10,6 +8,7 @@ mod de; mod path; mod pattern; mod quoter; +mod regex_set; mod resource; mod resource_path; mod router; diff --git a/actix-router/src/path.rs b/actix-router/src/path.rs index dc4150ddc..9031ab763 100644 --- a/actix-router/src/path.rs +++ b/actix-router/src/path.rs @@ -3,7 +3,7 @@ use std::{ ops::{DerefMut, Index}, }; -use serde::de; +use serde::{de, Deserialize}; use crate::{de::PathDeserializer, Resource, ResourcePath}; @@ -24,8 +24,13 @@ impl Default for PathItem { /// If resource path contains variable patterns, `Path` stores them. #[derive(Debug, Clone, Default)] pub struct Path { + /// Full path representation. path: T, + + /// Number of characters in `path` that have been processed into `segments`. pub(crate) skip: u16, + + /// List of processed dynamic segments; name->value pairs. pub(crate) segments: Vec<(Cow<'static, str>, PathItem)>, } @@ -83,8 +88,8 @@ impl Path { /// Set new path. #[inline] pub fn set(&mut self, path: T) { - self.skip = 0; self.path = path; + self.skip = 0; self.segments.clear(); } @@ -103,7 +108,7 @@ impl Path { pub(crate) fn add(&mut self, name: impl Into>, value: PathItem) { match value { - PathItem::Static(s) => self.segments.push((name.into(), PathItem::Static(s))), + PathItem::Static(seg) => self.segments.push((name.into(), PathItem::Static(seg))), PathItem::Segment(begin, end) => self.segments.push(( name.into(), PathItem::Segment(self.skip + begin, self.skip + end), @@ -149,15 +154,11 @@ impl Path { None } - /// Get matched parameter by name. + /// Returns matched parameter by name. /// /// If keyed parameter is not available empty string is used as default value. pub fn query(&self, key: &str) -> &str { - if let Some(s) = self.get(key) { - s - } else { - "" - } + self.get(key).unwrap_or_default() } /// Return iterator to items in parameter container. @@ -168,9 +169,13 @@ impl Path { } } - /// Try to deserialize matching parameters to a specified type `U` - pub fn load<'de, U: serde::Deserialize<'de>>(&'de self) -> Result { - de::Deserialize::deserialize(PathDeserializer::new(self)) + /// Deserializes matching parameters to a specified type `U`. + /// + /// # Errors + /// + /// Returns error when dynamic path segments cannot be deserialized into a `U` type. + pub fn load<'de, U: Deserialize<'de>>(&'de self) -> Result { + Deserialize::deserialize(PathDeserializer::new(self)) } } diff --git a/actix-router/src/regex_set.rs b/actix-router/src/regex_set.rs new file mode 100644 index 000000000..48f38df2c --- /dev/null +++ b/actix-router/src/regex_set.rs @@ -0,0 +1,66 @@ +//! Abstraction over `regex` and `regex-lite` depending on whether we have `unicode` crate feature +//! enabled. + +use cfg_if::cfg_if; +#[cfg(feature = "unicode")] +pub(crate) use regex::{escape, Regex}; +#[cfg(not(feature = "unicode"))] +pub(crate) use regex_lite::{escape, Regex}; + +#[cfg(feature = "unicode")] +#[derive(Debug, Clone)] +pub(crate) struct RegexSet(regex::RegexSet); + +#[cfg(not(feature = "unicode"))] +#[derive(Debug, Clone)] +pub(crate) struct RegexSet(Vec); + +impl RegexSet { + /// Create a new regex set. + /// + /// # Panics + /// + /// Panics if any path patterns are malformed. + pub(crate) fn new(re_set: Vec) -> Self { + cfg_if! { + if #[cfg(feature = "unicode")] { + Self(regex::RegexSet::new(re_set).unwrap()) + } else { + Self(re_set.iter().map(|re| Regex::new(re).unwrap()).collect()) + } + } + } + + /// Create a new empty regex set. + pub(crate) fn empty() -> Self { + cfg_if! { + if #[cfg(feature = "unicode")] { + Self(regex::RegexSet::empty()) + } else { + Self(Vec::new()) + } + } + } + + /// Returns true if regex set matches `path`. + pub(crate) fn is_match(&self, path: &str) -> bool { + cfg_if! { + if #[cfg(feature = "unicode")] { + self.0.is_match(path) + } else { + self.0.iter().any(|re| re.is_match(path)) + } + } + } + + /// Returns index within `path` of first match. + pub(crate) fn first_match_idx(&self, path: &str) -> Option { + cfg_if! { + if #[cfg(feature = "unicode")] { + self.0.matches(path).into_iter().next() + } else { + Some(self.0.iter().enumerate().find(|(_, re)| re.is_match(path))?.0) + } + } + } +} diff --git a/actix-router/src/resource.rs b/actix-router/src/resource.rs index abd132211..3a102945b 100644 --- a/actix-router/src/resource.rs +++ b/actix-router/src/resource.rs @@ -5,10 +5,13 @@ use std::{ mem, }; -use regex::{escape, Regex, RegexSet}; use tracing::error; -use crate::{path::PathItem, IntoPatterns, Patterns, Resource, ResourcePath}; +use crate::{ + path::PathItem, + regex_set::{escape, Regex, RegexSet}, + IntoPatterns, Patterns, Resource, ResourcePath, +}; const MAX_DYNAMIC_SEGMENTS: usize = 16; @@ -233,7 +236,7 @@ enum PatternSegment { Var(String), } -#[derive(Clone, Debug)] +#[derive(Debug, Clone)] #[allow(clippy::large_enum_variant)] enum PatternType { /// Single constant/literal segment. @@ -603,7 +606,7 @@ impl ResourceDef { PatternType::Dynamic(re, _) => Some(re.captures(path)?[1].len()), PatternType::DynamicSet(re, params) => { - let idx = re.matches(path).into_iter().next()?; + let idx = re.first_match_idx(path)?; let (ref pattern, _) = params[idx]; Some(pattern.captures(path)?[1].len()) } @@ -706,7 +709,7 @@ impl ResourceDef { PatternType::DynamicSet(re, params) => { let path = path.unprocessed(); - let (pattern, names) = match re.matches(path).into_iter().next() { + let (pattern, names) = match re.first_match_idx(path) { Some(idx) => ¶ms[idx], _ => return false, }; @@ -870,7 +873,7 @@ impl ResourceDef { } } - let pattern_re_set = RegexSet::new(re_set).unwrap(); + let pattern_re_set = RegexSet::new(re_set); let segments = segments.unwrap_or_default(); ( diff --git a/actix-test/CHANGES.md b/actix-test/CHANGES.md index a3ca7fe10..ec2dd6776 100644 --- a/actix-test/CHANGES.md +++ b/actix-test/CHANGES.md @@ -2,6 +2,21 @@ ## Unreleased +## 0.1.5 + +- Add `TestServerConfig::listen_address()` method. + +## 0.1.4 + +- Add `TestServerConfig::rustls_0_23()` method for Rustls v0.23 support behind new `rustls-0_23` crate feature. +- Add `TestServerConfig::disable_redirects()` method. +- Various types from `awc`, such as `ClientRequest` and `ClientResponse`, are now re-exported. +- Minimum supported Rust version (MSRV) is now 1.72. + +## 0.1.3 + +- Add `TestServerConfig::rustls_0_22()` method for Rustls v0.22 support behind new `rustls-0_22` crate feature. + ## 0.1.2 - Add `TestServerConfig::rustls_021()` method for Rustls v0.21 support behind new `rustls-0_21` crate feature. diff --git a/actix-test/Cargo.toml b/actix-test/Cargo.toml index 46d65abdc..34fdf2c82 100644 --- a/actix-test/Cargo.toml +++ b/actix-test/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "actix-test" -version = "0.1.2" +version = "0.1.5" authors = [ "Nikolay Kim ", "Rob Ede ", @@ -18,6 +18,22 @@ categories = [ license = "MIT OR Apache-2.0" edition = "2021" +[package.metadata.cargo_check_external_types] +allowed_external_types = [ + "actix_codec::*", + "actix_http_test::*", + "actix_http::*", + "actix_service::*", + "actix_web::*", + "awc::*", + "bytes::*", + "futures_core::*", + "http::*", + "openssl::*", + "rustls::*", + "tokio::*", +] + [features] default = [] @@ -27,19 +43,23 @@ rustls = ["rustls-0_20"] rustls-0_20 = ["tls-rustls-0_20", "actix-http/rustls-0_20", "awc/rustls-0_20"] # TLS via Rustls v0.21 rustls-0_21 = ["tls-rustls-0_21", "actix-http/rustls-0_21", "awc/rustls-0_21"] +# TLS via Rustls v0.22 +rustls-0_22 = ["tls-rustls-0_22", "actix-http/rustls-0_22", "awc/rustls-0_22-webpki-roots"] +# TLS via Rustls v0.23 +rustls-0_23 = ["tls-rustls-0_23", "actix-http/rustls-0_23", "awc/rustls-0_23-webpki-roots"] # TLS via OpenSSL openssl = ["tls-openssl", "actix-http/openssl", "awc/openssl"] [dependencies] actix-codec = "0.5" -actix-http = "3" +actix-http = "3.7" actix-http-test = "3" actix-rt = "2.1" actix-service = "2" actix-utils = "3" -actix-web = { version = "4", default-features = false, features = ["cookies"] } -awc = { version = "3", default-features = false, features = ["cookies"] } +actix-web = { version = "4.6", default-features = false, features = ["cookies"] } +awc = { version = "3.5", default-features = false, features = ["cookies"] } futures-core = { version = "0.3.17", default-features = false, features = ["std"] } futures-util = { version = "0.3.17", default-features = false, features = [] } @@ -50,4 +70,9 @@ serde_urlencoded = "0.7" tls-openssl = { package = "openssl", version = "0.10.55", optional = true } tls-rustls-0_20 = { package = "rustls", version = "0.20", optional = true } tls-rustls-0_21 = { package = "rustls", version = "0.21", optional = true } +tls-rustls-0_22 = { package = "rustls", version = "0.22", optional = true } +tls-rustls-0_23 = { package = "rustls", version = "0.23", default-features = false, optional = true } tokio = { version = "1.24.2", features = ["sync"] } + +[lints] +workspace = true diff --git a/actix-test/README.md b/actix-test/README.md new file mode 100644 index 000000000..1a9b6f22a --- /dev/null +++ b/actix-test/README.md @@ -0,0 +1,45 @@ +# `actix-test` + + + +[![crates.io](https://img.shields.io/crates/v/actix-test?label=latest)](https://crates.io/crates/actix-test) +[![Documentation](https://docs.rs/actix-test/badge.svg?version=0.1.5)](https://docs.rs/actix-test/0.1.5) +![Version](https://img.shields.io/badge/rustc-1.72+-ab6000.svg) +![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-test.svg) +
+[![dependency status](https://deps.rs/crate/actix-test/0.1.5/status.svg)](https://deps.rs/crate/actix-test/0.1.5) +[![Download](https://img.shields.io/crates/d/actix-test.svg)](https://crates.io/crates/actix-test) +[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x) + + + + + +Integration testing tools for Actix Web applications. + +The main integration testing tool is [`TestServer`]. It spawns a real HTTP server on an unused port and provides methods that use a real HTTP client. Therefore, it is much closer to real-world cases than using `init_service`, which skips HTTP encoding and decoding. + +## Examples + +```rust +use actix_web::{get, web, test, App, HttpResponse, Error, Responder}; + +#[get("/")] +async fn my_handler() -> Result { + Ok(HttpResponse::Ok()) +} + +#[actix_rt::test] +async fn test_example() { + let srv = actix_test::start(|| + App::new().service(my_handler) + ); + + let req = srv.get("/"); + let res = req.send().await.unwrap(); + + assert!(res.status().is_success()); +} +``` + + diff --git a/actix-test/src/lib.rs b/actix-test/src/lib.rs index e570bb266..f0da2c20d 100644 --- a/actix-test/src/lib.rs +++ b/actix-test/src/lib.rs @@ -5,6 +5,7 @@ //! real-world cases than using `init_service`, which skips HTTP encoding and decoding. //! //! # Examples +//! //! ``` //! use actix_web::{get, web, test, App, HttpResponse, Error, Responder}; //! @@ -26,8 +27,6 @@ //! } //! ``` -#![deny(rust_2018_idioms, nonstandard_style)] -#![warn(future_incompatible)] #![doc(html_logo_url = "https://actix.rs/img/logo.png")] #![doc(html_favicon_url = "https://actix.rs/favicon.ico")] #![cfg_attr(docsrs, feature(doc_auto_cfg))] @@ -52,7 +51,7 @@ use actix_web::{ rt::{self, System}, web, Error, }; -use awc::{error::PayloadError, Client, ClientRequest, ClientResponse, Connector}; +pub use awc::{error::PayloadError, Client, ClientRequest, ClientResponse, Connector}; use futures_core::Stream; use tokio::sync::mpsc; @@ -143,12 +142,18 @@ where StreamType::Rustls020(_) => true, #[cfg(feature = "rustls-0_21")] StreamType::Rustls021(_) => true, + #[cfg(feature = "rustls-0_22")] + StreamType::Rustls022(_) => true, + #[cfg(feature = "rustls-0_23")] + StreamType::Rustls023(_) => true, }; + let client_cfg = cfg.clone(); + // run server in separate orphaned thread thread::spawn(move || { rt::System::new().block_on(async move { - let tcp = net::TcpListener::bind(("127.0.0.1", cfg.port)).unwrap(); + let tcp = net::TcpListener::bind((cfg.listen_address.clone(), cfg.port)).unwrap(); let local_addr = tcp.local_addr().unwrap(); let factory = factory.clone(); let srv_cfg = cfg.clone(); @@ -327,6 +332,90 @@ where .rustls_021(config.clone()) }), }, + #[cfg(feature = "rustls-0_22")] + StreamType::Rustls022(config) => match cfg.tp { + HttpVer::Http1 => builder.listen("test", tcp, move || { + let app_cfg = + AppConfig::__priv_test_new(false, local_addr.to_string(), local_addr); + + let fac = factory() + .into_factory() + .map_err(|err| err.into().error_response()); + + HttpService::build() + .client_request_timeout(timeout) + .h1(map_config(fac, move |_| app_cfg.clone())) + .rustls_0_22(config.clone()) + }), + HttpVer::Http2 => builder.listen("test", tcp, move || { + let app_cfg = + AppConfig::__priv_test_new(false, local_addr.to_string(), local_addr); + + let fac = factory() + .into_factory() + .map_err(|err| err.into().error_response()); + + HttpService::build() + .client_request_timeout(timeout) + .h2(map_config(fac, move |_| app_cfg.clone())) + .rustls_0_22(config.clone()) + }), + HttpVer::Both => builder.listen("test", tcp, move || { + let app_cfg = + AppConfig::__priv_test_new(false, local_addr.to_string(), local_addr); + + let fac = factory() + .into_factory() + .map_err(|err| err.into().error_response()); + + HttpService::build() + .client_request_timeout(timeout) + .finish(map_config(fac, move |_| app_cfg.clone())) + .rustls_0_22(config.clone()) + }), + }, + #[cfg(feature = "rustls-0_23")] + StreamType::Rustls023(config) => match cfg.tp { + HttpVer::Http1 => builder.listen("test", tcp, move || { + let app_cfg = + AppConfig::__priv_test_new(false, local_addr.to_string(), local_addr); + + let fac = factory() + .into_factory() + .map_err(|err| err.into().error_response()); + + HttpService::build() + .client_request_timeout(timeout) + .h1(map_config(fac, move |_| app_cfg.clone())) + .rustls_0_23(config.clone()) + }), + HttpVer::Http2 => builder.listen("test", tcp, move || { + let app_cfg = + AppConfig::__priv_test_new(false, local_addr.to_string(), local_addr); + + let fac = factory() + .into_factory() + .map_err(|err| err.into().error_response()); + + HttpService::build() + .client_request_timeout(timeout) + .h2(map_config(fac, move |_| app_cfg.clone())) + .rustls_0_23(config.clone()) + }), + HttpVer::Both => builder.listen("test", tcp, move || { + let app_cfg = + AppConfig::__priv_test_new(false, local_addr.to_string(), local_addr); + + let fac = factory() + .into_factory() + .map_err(|err| err.into().error_response()); + + HttpService::build() + .client_request_timeout(timeout) + .finish(map_config(fac, move |_| app_cfg.clone())) + .rustls_0_23(config.clone()) + }), + }, } .expect("test server could not be created"); @@ -372,7 +461,13 @@ where } }; - Client::builder().connector(connector).finish() + let mut client_builder = Client::builder().connector(connector); + + if client_cfg.disable_redirects { + client_builder = client_builder.disable_redirects(); + } + + client_builder.finish() }; TestServer { @@ -392,6 +487,7 @@ enum HttpVer { Both, } +#[allow(clippy::large_enum_variant)] #[derive(Clone)] enum StreamType { Tcp, @@ -401,6 +497,10 @@ enum StreamType { Rustls020(tls_rustls_0_20::ServerConfig), #[cfg(feature = "rustls-0_21")] Rustls021(tls_rustls_0_21::ServerConfig), + #[cfg(feature = "rustls-0_22")] + Rustls022(tls_rustls_0_22::ServerConfig), + #[cfg(feature = "rustls-0_23")] + Rustls023(tls_rustls_0_23::ServerConfig), } /// Create default test server config. @@ -413,8 +513,10 @@ pub struct TestServerConfig { tp: HttpVer, stream: StreamType, client_request_timeout: Duration, + listen_address: String, port: u16, workers: usize, + disable_redirects: bool, } impl Default for TestServerConfig { @@ -424,56 +526,96 @@ impl Default for TestServerConfig { } impl TestServerConfig { - /// Create default server configuration + /// Constructs default server configuration. pub(crate) fn new() -> TestServerConfig { TestServerConfig { tp: HttpVer::Both, stream: StreamType::Tcp, client_request_timeout: Duration::from_secs(5), + listen_address: "127.0.0.1".to_string(), port: 0, workers: 1, + disable_redirects: false, } } - /// Accept HTTP/1.1 only. + /// Accepts HTTP/1.1 only. pub fn h1(mut self) -> Self { self.tp = HttpVer::Http1; self } - /// Accept HTTP/2 only. + /// Accepts HTTP/2 only. pub fn h2(mut self) -> Self { self.tp = HttpVer::Http2; self } - /// Accept secure connections via OpenSSL. + /// Accepts secure connections via OpenSSL. #[cfg(feature = "openssl")] pub fn openssl(mut self, acceptor: openssl::ssl::SslAcceptor) -> Self { self.stream = StreamType::Openssl(acceptor); self } - /// Accept secure connections via Rustls. + #[doc(hidden)] + #[deprecated(note = "Renamed to `rustls_0_20()`.")] #[cfg(feature = "rustls-0_20")] pub fn rustls(mut self, config: tls_rustls_0_20::ServerConfig) -> Self { self.stream = StreamType::Rustls020(config); self } - /// Accept secure connections via Rustls. + /// Accepts secure connections via Rustls v0.20. + #[cfg(feature = "rustls-0_20")] + pub fn rustls_0_20(mut self, config: tls_rustls_0_20::ServerConfig) -> Self { + self.stream = StreamType::Rustls020(config); + self + } + + #[doc(hidden)] + #[deprecated(note = "Renamed to `rustls_0_21()`.")] #[cfg(feature = "rustls-0_21")] pub fn rustls_021(mut self, config: tls_rustls_0_21::ServerConfig) -> Self { self.stream = StreamType::Rustls021(config); self } - /// Set client timeout for first request. + /// Accepts secure connections via Rustls v0.21. + #[cfg(feature = "rustls-0_21")] + pub fn rustls_0_21(mut self, config: tls_rustls_0_21::ServerConfig) -> Self { + self.stream = StreamType::Rustls021(config); + self + } + + /// Accepts secure connections via Rustls v0.22. + #[cfg(feature = "rustls-0_22")] + pub fn rustls_0_22(mut self, config: tls_rustls_0_22::ServerConfig) -> Self { + self.stream = StreamType::Rustls022(config); + self + } + + /// Accepts secure connections via Rustls v0.23. + #[cfg(feature = "rustls-0_23")] + pub fn rustls_0_23(mut self, config: tls_rustls_0_23::ServerConfig) -> Self { + self.stream = StreamType::Rustls023(config); + self + } + + /// Sets client timeout for first request. pub fn client_request_timeout(mut self, dur: Duration) -> Self { self.client_request_timeout = dur; self } + /// Sets the address the server will listen on. + /// + /// By default, only listens on `127.0.0.1`. + pub fn listen_address(mut self, addr: impl Into) -> Self { + self.listen_address = addr.into(); + self + } + /// Sets test server port. /// /// By default, a random free port is determined by the OS. @@ -489,6 +631,15 @@ impl TestServerConfig { self.workers = workers; self } + + /// Instruct the client to not follow redirects. + /// + /// By default, the client will follow up to 10 consecutive redirects + /// before giving up. + pub fn disable_redirects(mut self) -> Self { + self.disable_redirects = true; + self + } } /// A basic HTTP server controller that simplifies the process of writing integration tests for @@ -515,9 +666,9 @@ impl TestServer { let scheme = if self.tls { "https" } else { "http" }; if uri.starts_with('/') { - format!("{}://localhost:{}{}", scheme, self.addr.port(), uri) + format!("{}://{}{}", scheme, self.addr, uri) } else { - format!("{}://localhost:{}/{}", scheme, self.addr.port(), uri) + format!("{}://{}/{}", scheme, self.addr, uri) } } diff --git a/actix-web-actors/CHANGES.md b/actix-web-actors/CHANGES.md index 5c516db56..3f214274d 100644 --- a/actix-web-actors/CHANGES.md +++ b/actix-web-actors/CHANGES.md @@ -2,6 +2,14 @@ ## Unreleased +## 4.3.1 + +- Reduce memory usage by `take`-ing (rather than `split`-ing) the encoded buffer when yielding bytes in the response stream. +- Mark crate as deprecated. +- Minimum supported Rust version (MSRV) is now 1.72. + +## 4.3.0 + - Minimum supported Rust version (MSRV) is now 1.68 due to transitive `time` dependency. ## 4.2.0 diff --git a/actix-web-actors/Cargo.toml b/actix-web-actors/Cargo.toml index c6f14554a..e7034ab84 100644 --- a/actix-web-actors/Cargo.toml +++ b/actix-web-actors/Cargo.toml @@ -1,17 +1,24 @@ [package] name = "actix-web-actors" -version = "4.2.0" +version = "4.3.1+deprecated" authors = ["Nikolay Kim "] description = "Actix actors support for Actix Web" keywords = ["actix", "http", "web", "framework", "async"] -homepage = "https://actix.rs" -repository = "https://github.com/actix/actix-web" -license = "MIT OR Apache-2.0" -edition = "2021" +homepage.workspace = true +repository.workspace = true +license.workspace = true +edition.workspace = true +rust-version.workspace = true -[lib] -name = "actix_web_actors" -path = "src/lib.rs" +[package.metadata.cargo_check_external_types] +allowed_external_types = [ + "actix::*", + "actix_http::*", + "actix_web::*", + "bytes::*", + "bytestring::*", + "futures_core::*", +] [dependencies] actix = { version = ">=0.12, <0.14", default-features = false } @@ -32,6 +39,9 @@ actix-test = "0.1" awc = { version = "3", default-features = false } actix-web = { version = "4", features = ["macros"] } -env_logger = "0.10" +env_logger = "0.11" futures-util = { version = "0.3.17", default-features = false, features = ["std"] } mime = "0.3" + +[lints] +workspace = true diff --git a/actix-web-actors/README.md b/actix-web-actors/README.md index b2c30b954..0ec91a224 100644 --- a/actix-web-actors/README.md +++ b/actix-web-actors/README.md @@ -1,17 +1,18 @@ -# actix-web-actors +# `actix-web-actors` > Actix actors support for Actix Web. +> +> This crate is deprecated. Migrate to [`actix-ws`](https://crates.io/crates/actix-ws). + + [![crates.io](https://img.shields.io/crates/v/actix-web-actors?label=latest)](https://crates.io/crates/actix-web-actors) -[![Documentation](https://docs.rs/actix-web-actors/badge.svg?version=4.2.0)](https://docs.rs/actix-web-actors/4.2.0) -![Version](https://img.shields.io/badge/rustc-1.68+-ab6000.svg) +[![Documentation](https://docs.rs/actix-web-actors/badge.svg?version=4.3.1)](https://docs.rs/actix-web-actors/4.3.1) +![Version](https://img.shields.io/badge/rustc-1.72+-ab6000.svg) ![License](https://img.shields.io/crates/l/actix-web-actors.svg)
-[![dependency status](https://deps.rs/crate/actix-web-actors/4.2.0/status.svg)](https://deps.rs/crate/actix-web-actors/4.2.0) +![maintenance-status](https://img.shields.io/badge/maintenance-deprecated-red.svg) [![Download](https://img.shields.io/crates/d/actix-web-actors.svg)](https://crates.io/crates/actix-web-actors) [![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x) -## Documentation & Resources - -- [API Documentation](https://docs.rs/actix-web-actors) -- Minimum Supported Rust Version (MSRV): 1.68 + diff --git a/actix-web-actors/src/context.rs b/actix-web-actors/src/context.rs index be8fd387c..23e336459 100644 --- a/actix-web-actors/src/context.rs +++ b/actix-web-actors/src/context.rs @@ -248,13 +248,11 @@ where mod tests { use std::time::Duration; - use actix::Actor; use actix_web::{ http::StatusCode, test::{call_service, init_service, read_body, TestRequest}, web, App, HttpResponse, }; - use bytes::Bytes; use super::*; diff --git a/actix-web-actors/src/lib.rs b/actix-web-actors/src/lib.rs index d89b0ee35..4831d2637 100644 --- a/actix-web-actors/src/lib.rs +++ b/actix-web-actors/src/lib.rs @@ -1,5 +1,7 @@ //! Actix actors support for Actix Web. //! +//! This crate is deprecated. Migrate to [`actix-ws`](https://crates.io/crates/actix-ws). +//! //! # Examples //! //! ```no_run @@ -55,8 +57,6 @@ //! * [`HttpContext`]: This struct provides actor support for streaming HTTP responses. //! -#![deny(rust_2018_idioms, nonstandard_style)] -#![warn(future_incompatible)] #![doc(html_logo_url = "https://actix.rs/img/logo.png")] #![doc(html_favicon_url = "https://actix.rs/favicon.ico")] #![cfg_attr(docsrs, feature(doc_auto_cfg))] diff --git a/actix-web-actors/src/ws.rs b/actix-web-actors/src/ws.rs index 04dbf5e17..7f7607fa9 100644 --- a/actix-web-actors/src/ws.rs +++ b/actix-web-actors/src/ws.rs @@ -710,7 +710,7 @@ where } if !this.buf.is_empty() { - Poll::Ready(Some(Ok(this.buf.split().freeze()))) + Poll::Ready(Some(Ok(std::mem::take(&mut this.buf).freeze()))) } else if this.fut.alive() && !this.closed { Poll::Pending } else { @@ -817,10 +817,7 @@ where #[cfg(test)] mod tests { - use actix_web::{ - http::{header, Method}, - test::TestRequest, - }; + use actix_web::test::TestRequest; use super::*; diff --git a/actix-web-codegen/CHANGES.md b/actix-web-codegen/CHANGES.md index 00e36b037..d143723f4 100644 --- a/actix-web-codegen/CHANGES.md +++ b/actix-web-codegen/CHANGES.md @@ -2,6 +2,13 @@ ## Unreleased +## 4.3.0 + +- Add `#[scope]` macro. +- Add `compat-routing-macros-force-pub` crate feature which, on-by-default, which when disabled causes handlers to inherit their attached function's visibility. +- Prevent inclusion of default `actix-router` features. +- Minimum supported Rust version (MSRV) is now 1.72. + ## 4.2.2 - Fix regression when declaring `wrap` attribute using an expression. diff --git a/actix-web-codegen/Cargo.toml b/actix-web-codegen/Cargo.toml index 7039ea7df..d3938da8e 100644 --- a/actix-web-codegen/Cargo.toml +++ b/actix-web-codegen/Cargo.toml @@ -1,21 +1,26 @@ [package] name = "actix-web-codegen" -version = "4.2.2" +version = "4.3.0" description = "Routing and runtime macros for Actix Web" -homepage = "https://actix.rs" -repository = "https://github.com/actix/actix-web" authors = [ "Nikolay Kim ", "Rob Ede ", ] -license = "MIT OR Apache-2.0" -edition = "2021" +homepage.workspace = true +repository.workspace = true +license.workspace = true +edition.workspace = true +rust-version.workspace = true [lib] proc-macro = true +[features] +default = ["compat-routing-macros-force-pub"] +compat-routing-macros-force-pub = [] + [dependencies] -actix-router = "0.5" +actix-router = { version = "0.5", default-features = false } proc-macro2 = "1" quote = "1" syn = { version = "2", features = ["full", "extra-traits"] } @@ -30,3 +35,6 @@ actix-web = "4" futures-core = { version = "0.3.17", default-features = false, features = ["alloc"] } trybuild = "1" rustversion = "1" + +[lints] +workspace = true diff --git a/actix-web-codegen/README.md b/actix-web-codegen/README.md index e9a1f9c7e..e61bf5c74 100644 --- a/actix-web-codegen/README.md +++ b/actix-web-codegen/README.md @@ -1,20 +1,19 @@ -# actix-web-codegen +# `actix-web-codegen` > Routing and runtime macros for Actix Web. + + [![crates.io](https://img.shields.io/crates/v/actix-web-codegen?label=latest)](https://crates.io/crates/actix-web-codegen) -[![Documentation](https://docs.rs/actix-web-codegen/badge.svg?version=4.2.2)](https://docs.rs/actix-web-codegen/4.2.2) -![Version](https://img.shields.io/badge/rustc-1.68+-ab6000.svg) +[![Documentation](https://docs.rs/actix-web-codegen/badge.svg?version=4.3.0)](https://docs.rs/actix-web-codegen/4.3.0) +![Version](https://img.shields.io/badge/rustc-1.72+-ab6000.svg) ![License](https://img.shields.io/crates/l/actix-web-codegen.svg)
-[![dependency status](https://deps.rs/crate/actix-web-codegen/4.2.2/status.svg)](https://deps.rs/crate/actix-web-codegen/4.2.2) +[![dependency status](https://deps.rs/crate/actix-web-codegen/4.3.0/status.svg)](https://deps.rs/crate/actix-web-codegen/4.3.0) [![Download](https://img.shields.io/crates/d/actix-web-codegen.svg)](https://crates.io/crates/actix-web-codegen) [![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x) -## Documentation & Resources - -- [API Documentation](https://docs.rs/actix-web-codegen) -- Minimum Supported Rust Version (MSRV): 1.68 + ## Compile Testing diff --git a/actix-web-codegen/src/lib.rs b/actix-web-codegen/src/lib.rs index 6d6c9ab5c..e22bff8cd 100644 --- a/actix-web-codegen/src/lib.rs +++ b/actix-web-codegen/src/lib.rs @@ -73,8 +73,6 @@ //! [DELETE]: macro@delete #![recursion_limit = "512"] -#![deny(rust_2018_idioms, nonstandard_style)] -#![warn(future_incompatible)] #![doc(html_logo_url = "https://actix.rs/img/logo.png")] #![doc(html_favicon_url = "https://actix.rs/favicon.ico")] #![cfg_attr(docsrs, feature(doc_auto_cfg))] @@ -83,6 +81,7 @@ use proc_macro::TokenStream; use quote::quote; mod route; +mod scope; /// Creates resource handler, allowing multiple HTTP method guards. /// @@ -197,6 +196,43 @@ method_macro!(Options, options); method_macro!(Trace, trace); method_macro!(Patch, patch); +/// Prepends a path prefix to all handlers using routing macros inside the attached module. +/// +/// # Syntax +/// +/// ``` +/// # use actix_web_codegen::scope; +/// #[scope("/prefix")] +/// mod api { +/// // ... +/// } +/// ``` +/// +/// # Arguments +/// +/// - `"/prefix"` - Raw literal string to be prefixed onto contained handlers' paths. +/// +/// # Example +/// +/// ``` +/// # use actix_web_codegen::{scope, get}; +/// # use actix_web::Responder; +/// #[scope("/api")] +/// mod api { +/// # use super::*; +/// #[get("/hello")] +/// pub async fn hello() -> impl Responder { +/// // this has path /api/hello +/// "Hello, world!" +/// } +/// } +/// # fn main() {} +/// ``` +#[proc_macro_attribute] +pub fn scope(args: TokenStream, input: TokenStream) -> TokenStream { + scope::with_scope(args, input) +} + /// Marks async main function as the Actix Web system entry-point. /// /// Note that Actix Web also works under `#[tokio::main]` since version 4.0. However, this macro is @@ -240,3 +276,15 @@ pub fn test(_: TokenStream, item: TokenStream) -> TokenStream { output.extend(item); output } + +/// Converts the error to a token stream and appends it to the original input. +/// +/// Returning the original input in addition to the error is good for IDEs which can gracefully +/// recover and show more precise errors within the macro body. +/// +/// See for more info. +fn input_and_compile_error(mut item: TokenStream, err: syn::Error) -> TokenStream { + let compile_err = TokenStream::from(err.to_compile_error()); + item.extend(compile_err); + item +} diff --git a/actix-web-codegen/src/route.rs b/actix-web-codegen/src/route.rs index 7a2dfc051..e24903e3a 100644 --- a/actix-web-codegen/src/route.rs +++ b/actix-web-codegen/src/route.rs @@ -6,10 +6,12 @@ use proc_macro2::{Span, TokenStream as TokenStream2}; use quote::{quote, ToTokens, TokenStreamExt}; use syn::{punctuated::Punctuated, Ident, LitStr, Path, Token}; +use crate::input_and_compile_error; + #[derive(Debug)] pub struct RouteArgs { - path: syn::LitStr, - options: Punctuated, + pub(crate) path: syn::LitStr, + pub(crate) options: Punctuated, } impl syn::parse::Parse for RouteArgs { @@ -78,7 +80,7 @@ macro_rules! standard_method_type { } } - fn from_path(method: &Path) -> Result { + pub(crate) fn from_path(method: &Path) -> Result { match () { $(_ if method.is_ident(stringify!($lower)) => Ok(Self::$variant),)+ _ => Err(()), @@ -411,6 +413,13 @@ impl ToTokens for Route { doc_attributes, } = self; + #[allow(unused_variables)] // used when force-pub feature is disabled + let vis = &ast.vis; + + // TODO(breaking): remove this force-pub forwards-compatibility feature + #[cfg(feature = "compat-routing-macros-force-pub")] + let vis = syn::Visibility::Public(::default()); + let registrations: TokenStream2 = args .iter() .map(|args| { @@ -458,7 +467,7 @@ impl ToTokens for Route { let stream = quote! { #(#doc_attributes)* #[allow(non_camel_case_types, missing_docs)] - pub struct #name; + #vis struct #name; impl ::actix_web::dev::HttpServiceFactory for #name { fn register(self, __config: &mut actix_web::dev::AppService) { @@ -542,15 +551,3 @@ pub(crate) fn with_methods(input: TokenStream) -> TokenStream { Err(err) => input_and_compile_error(input, err), } } - -/// Converts the error to a token stream and appends it to the original input. -/// -/// Returning the original input in addition to the error is good for IDEs which can gracefully -/// recover and show more precise errors within the macro body. -/// -/// See for more info. -fn input_and_compile_error(mut item: TokenStream, err: syn::Error) -> TokenStream { - let compile_err = TokenStream::from(err.to_compile_error()); - item.extend(compile_err); - item -} diff --git a/actix-web-codegen/src/scope.rs b/actix-web-codegen/src/scope.rs new file mode 100644 index 000000000..067d95a60 --- /dev/null +++ b/actix-web-codegen/src/scope.rs @@ -0,0 +1,103 @@ +use proc_macro::TokenStream; +use proc_macro2::{Span, TokenStream as TokenStream2}; +use quote::{quote, ToTokens as _}; + +use crate::{ + input_and_compile_error, + route::{MethodType, RouteArgs}, +}; + +pub fn with_scope(args: TokenStream, input: TokenStream) -> TokenStream { + match with_scope_inner(args, input.clone()) { + Ok(stream) => stream, + Err(err) => input_and_compile_error(input, err), + } +} + +fn with_scope_inner(args: TokenStream, input: TokenStream) -> syn::Result { + if args.is_empty() { + return Err(syn::Error::new( + Span::call_site(), + "missing arguments for scope macro, expected: #[scope(\"/prefix\")]", + )); + } + + let scope_prefix = syn::parse::(args.clone()).map_err(|err| { + syn::Error::new( + err.span(), + "argument to scope macro is not a string literal, expected: #[scope(\"/prefix\")]", + ) + })?; + + let scope_prefix_value = scope_prefix.value(); + + if scope_prefix_value.ends_with('/') { + // trailing slashes cause non-obvious problems + // it's better to point them out to developers rather than + + return Err(syn::Error::new( + scope_prefix.span(), + "scopes should not have trailing slashes; see https://docs.rs/actix-web/4/actix_web/struct.Scope.html#avoid-trailing-slashes", + )); + } + + let mut module = syn::parse::(input).map_err(|err| { + syn::Error::new(err.span(), "#[scope] macro must be attached to a module") + })?; + + // modify any routing macros (method or route[s]) attached to + // functions by prefixing them with this scope macro's argument + if let Some((_, items)) = &mut module.content { + for item in items { + if let syn::Item::Fn(fun) = item { + fun.attrs = fun + .attrs + .iter() + .map(|attr| modify_attribute_with_scope(attr, &scope_prefix_value)) + .collect(); + } + } + } + + Ok(module.to_token_stream().into()) +} + +/// Checks if the attribute is a method type and has a route path, then modifies it. +fn modify_attribute_with_scope(attr: &syn::Attribute, scope_path: &str) -> syn::Attribute { + match (attr.parse_args::(), attr.clone().meta) { + (Ok(route_args), syn::Meta::List(meta_list)) if has_allowed_methods_in_scope(attr) => { + let modified_path = format!("{}{}", scope_path, route_args.path.value()); + + let options_tokens: Vec = route_args + .options + .iter() + .map(|option| { + quote! { ,#option } + }) + .collect(); + + let combined_options_tokens: TokenStream2 = + options_tokens + .into_iter() + .fold(TokenStream2::new(), |mut acc, ts| { + acc.extend(std::iter::once(ts)); + acc + }); + + syn::Attribute { + meta: syn::Meta::List(syn::MetaList { + tokens: quote! { #modified_path #combined_options_tokens }, + ..meta_list.clone() + }), + ..attr.clone() + } + } + _ => attr.clone(), + } +} + +fn has_allowed_methods_in_scope(attr: &syn::Attribute) -> bool { + MethodType::from_path(attr.path()).is_ok() + || attr.path().is_ident("route") + || attr.path().is_ident("ROUTE") +} diff --git a/actix-web-codegen/tests/test_macro.rs b/actix-web-codegen/tests/routes.rs similarity index 99% rename from actix-web-codegen/tests/test_macro.rs rename to actix-web-codegen/tests/routes.rs index fb50d4ae0..a6e606871 100644 --- a/actix-web-codegen/tests/test_macro.rs +++ b/actix-web-codegen/tests/routes.rs @@ -145,7 +145,7 @@ async fn custom_resource_name_test<'a>(req: HttpRequest) -> impl Responder { mod guard_module { use actix_web::{guard::GuardContext, http::header}; - pub fn guard(ctx: &GuardContext) -> bool { + pub fn guard(ctx: &GuardContext<'_>) -> bool { ctx.header::() .map(|h| h.preference() == "image/*") .unwrap_or(false) diff --git a/actix-web-codegen/tests/scopes.rs b/actix-web-codegen/tests/scopes.rs new file mode 100644 index 000000000..b8c832682 --- /dev/null +++ b/actix-web-codegen/tests/scopes.rs @@ -0,0 +1,200 @@ +use actix_web::{guard::GuardContext, http, http::header, web, App, HttpResponse, Responder}; +use actix_web_codegen::{delete, get, post, route, routes, scope}; + +pub fn image_guard(ctx: &GuardContext<'_>) -> bool { + ctx.header::() + .map(|h| h.preference() == "image/*") + .unwrap_or(false) +} + +#[scope("/test")] +mod scope_module { + // ensure that imports can be brought into the scope + use super::*; + + #[get("/test/guard", guard = "image_guard")] + pub async fn guard() -> impl Responder { + HttpResponse::Ok() + } + + #[get("/test")] + pub async fn test() -> impl Responder { + HttpResponse::Ok().finish() + } + + #[get("/twice-test/{value}")] + pub async fn twice(value: web::Path) -> impl actix_web::Responder { + let int_value: i32 = value.parse().unwrap_or(0); + let doubled = int_value * 2; + HttpResponse::Ok().body(format!("Twice value: {}", doubled)) + } + + #[post("/test")] + pub async fn post() -> impl Responder { + HttpResponse::Ok().body("post works") + } + + #[delete("/test")] + pub async fn delete() -> impl Responder { + "delete works" + } + + #[route("/test", method = "PUT", method = "PATCH", method = "CUSTOM")] + pub async fn multiple_shared_path() -> impl Responder { + HttpResponse::Ok().finish() + } + + #[routes] + #[head("/test1")] + #[connect("/test2")] + #[options("/test3")] + #[trace("/test4")] + pub async fn multiple_separate_paths() -> impl Responder { + HttpResponse::Ok().finish() + } + + // test calling this from other mod scope with scope attribute... + pub fn mod_common(message: String) -> impl actix_web::Responder { + HttpResponse::Ok().body(message) + } +} + +/// Scope doc string to check in cargo expand. +#[scope("/v1")] +mod mod_scope_v1 { + use super::*; + + /// Route doc string to check in cargo expand. + #[get("/test")] + pub async fn test() -> impl Responder { + scope_module::mod_common("version1 works".to_string()) + } +} + +#[scope("/v2")] +mod mod_scope_v2 { + use super::*; + + // check to make sure non-function tokens in the scope block are preserved... + enum TestEnum { + Works, + } + + #[get("/test")] + pub async fn test() -> impl Responder { + // make sure this type still exists... + let test_enum = TestEnum::Works; + + match test_enum { + TestEnum::Works => scope_module::mod_common("version2 works".to_string()), + } + } +} + +#[actix_rt::test] +async fn scope_get_async() { + let srv = actix_test::start(|| App::new().service(scope_module::test)); + + let request = srv.request(http::Method::GET, srv.url("/test/test")); + let response = request.send().await.unwrap(); + assert!(response.status().is_success()); +} + +#[actix_rt::test] +async fn scope_get_param_async() { + let srv = actix_test::start(|| App::new().service(scope_module::twice)); + + let request = srv.request(http::Method::GET, srv.url("/test/twice-test/4")); + let mut response = request.send().await.unwrap(); + let body = response.body().await.unwrap(); + let body_str = String::from_utf8(body.to_vec()).unwrap(); + assert_eq!(body_str, "Twice value: 8"); +} + +#[actix_rt::test] +async fn scope_post_async() { + let srv = actix_test::start(|| App::new().service(scope_module::post)); + + let request = srv.request(http::Method::POST, srv.url("/test/test")); + let mut response = request.send().await.unwrap(); + let body = response.body().await.unwrap(); + let body_str = String::from_utf8(body.to_vec()).unwrap(); + assert_eq!(body_str, "post works"); +} + +#[actix_rt::test] +async fn multiple_shared_path_async() { + let srv = actix_test::start(|| App::new().service(scope_module::multiple_shared_path)); + + let request = srv.request(http::Method::PUT, srv.url("/test/test")); + let response = request.send().await.unwrap(); + assert!(response.status().is_success()); + + let request = srv.request(http::Method::PATCH, srv.url("/test/test")); + let response = request.send().await.unwrap(); + assert!(response.status().is_success()); +} + +#[actix_rt::test] +async fn multiple_multi_path_async() { + let srv = actix_test::start(|| App::new().service(scope_module::multiple_separate_paths)); + + let request = srv.request(http::Method::HEAD, srv.url("/test/test1")); + let response = request.send().await.unwrap(); + assert!(response.status().is_success()); + + let request = srv.request(http::Method::CONNECT, srv.url("/test/test2")); + let response = request.send().await.unwrap(); + assert!(response.status().is_success()); + + let request = srv.request(http::Method::OPTIONS, srv.url("/test/test3")); + let response = request.send().await.unwrap(); + assert!(response.status().is_success()); + + let request = srv.request(http::Method::TRACE, srv.url("/test/test4")); + let response = request.send().await.unwrap(); + assert!(response.status().is_success()); +} + +#[actix_rt::test] +async fn scope_delete_async() { + let srv = actix_test::start(|| App::new().service(scope_module::delete)); + + let request = srv.request(http::Method::DELETE, srv.url("/test/test")); + let mut response = request.send().await.unwrap(); + let body = response.body().await.unwrap(); + let body_str = String::from_utf8(body.to_vec()).unwrap(); + assert_eq!(body_str, "delete works"); +} + +#[actix_rt::test] +async fn scope_get_with_guard_async() { + let srv = actix_test::start(|| App::new().service(scope_module::guard)); + + let request = srv + .request(http::Method::GET, srv.url("/test/test/guard")) + .insert_header(("Accept", "image/*")); + let response = request.send().await.unwrap(); + assert!(response.status().is_success()); +} + +#[actix_rt::test] +async fn scope_v1_v2_async() { + let srv = actix_test::start(|| { + App::new() + .service(mod_scope_v1::test) + .service(mod_scope_v2::test) + }); + + let request = srv.request(http::Method::GET, srv.url("/v1/test")); + let mut response = request.send().await.unwrap(); + let body = response.body().await.unwrap(); + let body_str = String::from_utf8(body.to_vec()).unwrap(); + assert_eq!(body_str, "version1 works"); + + let request = srv.request(http::Method::GET, srv.url("/v2/test")); + let mut response = request.send().await.unwrap(); + let body = response.body().await.unwrap(); + let body_str = String::from_utf8(body.to_vec()).unwrap(); + assert_eq!(body_str, "version2 works"); +} diff --git a/actix-web-codegen/tests/trybuild.rs b/actix-web-codegen/tests/trybuild.rs index 8e1f58a4c..91073cf3b 100644 --- a/actix-web-codegen/tests/trybuild.rs +++ b/actix-web-codegen/tests/trybuild.rs @@ -1,4 +1,4 @@ -#[rustversion::stable(1.68)] // MSRV +#[rustversion::stable(1.72)] // MSRV #[test] fn compile_macros() { let t = trybuild::TestCases::new(); @@ -18,6 +18,11 @@ fn compile_macros() { t.compile_fail("tests/trybuild/routes-missing-method-fail.rs"); t.compile_fail("tests/trybuild/routes-missing-args-fail.rs"); + t.compile_fail("tests/trybuild/scope-on-handler.rs"); + t.compile_fail("tests/trybuild/scope-missing-args.rs"); + t.compile_fail("tests/trybuild/scope-invalid-args.rs"); + t.compile_fail("tests/trybuild/scope-trailing-slash.rs"); + t.pass("tests/trybuild/docstring-ok.rs"); t.pass("tests/trybuild/test-runtime.rs"); diff --git a/actix-web-codegen/tests/trybuild/route-custom-lowercase.stderr b/actix-web-codegen/tests/trybuild/route-custom-lowercase.stderr index 88198a55d..c2a51d005 100644 --- a/actix-web-codegen/tests/trybuild/route-custom-lowercase.stderr +++ b/actix-web-codegen/tests/trybuild/route-custom-lowercase.stderr @@ -13,17 +13,20 @@ error[E0277]: the trait bound `fn() -> impl std::future::Future | required by a bound introduced by this call | = help: the following other types implement trait `HttpServiceFactory`: + Resource + actix_web::Scope + Vec + Redirect + (A,) (A, B) (A, B, C) (A, B, C, D) - (A, B, C, D, E) - (A, B, C, D, E, F) - (A, B, C, D, E, F, G) - (A, B, C, D, E, F, G, H) - (A, B, C, D, E, F, G, H, I) and $N others note: required by a bound in `App::::service` --> $WORKSPACE/actix-web/src/app.rs | + | pub fn service(mut self, factory: F) -> Self + | ------- required by a bound in this associated function + | where | F: HttpServiceFactory + 'static, | ^^^^^^^^^^^^^^^^^^ required by this bound in `App::::service` diff --git a/actix-web-codegen/tests/trybuild/route-duplicate-method-fail.stderr b/actix-web-codegen/tests/trybuild/route-duplicate-method-fail.stderr index bda736348..ae18f347f 100644 --- a/actix-web-codegen/tests/trybuild/route-duplicate-method-fail.stderr +++ b/actix-web-codegen/tests/trybuild/route-duplicate-method-fail.stderr @@ -13,17 +13,20 @@ error[E0277]: the trait bound `fn() -> impl std::future::Future | required by a bound introduced by this call | = help: the following other types implement trait `HttpServiceFactory`: + Resource + actix_web::Scope + Vec + Redirect + (A,) (A, B) (A, B, C) (A, B, C, D) - (A, B, C, D, E) - (A, B, C, D, E, F) - (A, B, C, D, E, F, G) - (A, B, C, D, E, F, G, H) - (A, B, C, D, E, F, G, H, I) and $N others note: required by a bound in `App::::service` --> $WORKSPACE/actix-web/src/app.rs | + | pub fn service(mut self, factory: F) -> Self + | ------- required by a bound in this associated function + | where | F: HttpServiceFactory + 'static, | ^^^^^^^^^^^^^^^^^^ required by this bound in `App::::service` diff --git a/actix-web-codegen/tests/trybuild/route-malformed-path-fail.stderr b/actix-web-codegen/tests/trybuild/route-malformed-path-fail.stderr index 93c510109..c1100c784 100644 --- a/actix-web-codegen/tests/trybuild/route-malformed-path-fail.stderr +++ b/actix-web-codegen/tests/trybuild/route-malformed-path-fail.stderr @@ -20,10 +20,7 @@ error: custom attribute panicked 13 | #[get("/{}")] | ^^^^^^^^^^^^^ | - = help: message: Wrong path pattern: "/{}" regex parse error: - ((?s-m)^/(?P<>[^/]+))$ - ^ - error: empty capture group name + = help: message: Wrong path pattern: "/{}" empty capture group names are not allowed error: custom attribute panicked --> $DIR/route-malformed-path-fail.rs:23:1 diff --git a/actix-web-codegen/tests/trybuild/route-missing-method-fail.stderr b/actix-web-codegen/tests/trybuild/route-missing-method-fail.stderr index 9f2f788fb..37d8354c9 100644 --- a/actix-web-codegen/tests/trybuild/route-missing-method-fail.stderr +++ b/actix-web-codegen/tests/trybuild/route-missing-method-fail.stderr @@ -15,17 +15,20 @@ error[E0277]: the trait bound `fn() -> impl std::future::Future | required by a bound introduced by this call | = help: the following other types implement trait `HttpServiceFactory`: + Resource + actix_web::Scope + Vec + Redirect + (A,) (A, B) (A, B, C) (A, B, C, D) - (A, B, C, D, E) - (A, B, C, D, E, F) - (A, B, C, D, E, F, G) - (A, B, C, D, E, F, G, H) - (A, B, C, D, E, F, G, H, I) and $N others note: required by a bound in `App::::service` --> $WORKSPACE/actix-web/src/app.rs | + | pub fn service(mut self, factory: F) -> Self + | ------- required by a bound in this associated function + | where | F: HttpServiceFactory + 'static, | ^^^^^^^^^^^^^^^^^^ required by this bound in `App::::service` diff --git a/actix-web-codegen/tests/trybuild/routes-missing-args-fail.stderr b/actix-web-codegen/tests/trybuild/routes-missing-args-fail.stderr index 2e84c296a..40b19fc77 100644 --- a/actix-web-codegen/tests/trybuild/routes-missing-args-fail.stderr +++ b/actix-web-codegen/tests/trybuild/routes-missing-args-fail.stderr @@ -29,17 +29,20 @@ error[E0277]: the trait bound `fn() -> impl std::future::Future | required by a bound introduced by this call | = help: the following other types implement trait `HttpServiceFactory`: + Resource + actix_web::Scope + Vec + Redirect + (A,) (A, B) (A, B, C) (A, B, C, D) - (A, B, C, D, E) - (A, B, C, D, E, F) - (A, B, C, D, E, F, G) - (A, B, C, D, E, F, G, H) - (A, B, C, D, E, F, G, H, I) and $N others note: required by a bound in `App::::service` --> $WORKSPACE/actix-web/src/app.rs | + | pub fn service(mut self, factory: F) -> Self + | ------- required by a bound in this associated function + | where | F: HttpServiceFactory + 'static, | ^^^^^^^^^^^^^^^^^^ required by this bound in `App::::service` diff --git a/actix-web-codegen/tests/trybuild/routes-missing-method-fail.stderr b/actix-web-codegen/tests/trybuild/routes-missing-method-fail.stderr index 228dced9c..ff7f00b3b 100644 --- a/actix-web-codegen/tests/trybuild/routes-missing-method-fail.stderr +++ b/actix-web-codegen/tests/trybuild/routes-missing-method-fail.stderr @@ -15,17 +15,20 @@ error[E0277]: the trait bound `fn() -> impl std::future::Future | required by a bound introduced by this call | = help: the following other types implement trait `HttpServiceFactory`: + Resource + actix_web::Scope + Vec + Redirect + (A,) (A, B) (A, B, C) (A, B, C, D) - (A, B, C, D, E) - (A, B, C, D, E, F) - (A, B, C, D, E, F, G) - (A, B, C, D, E, F, G, H) - (A, B, C, D, E, F, G, H, I) and $N others note: required by a bound in `App::::service` --> $WORKSPACE/actix-web/src/app.rs | + | pub fn service(mut self, factory: F) -> Self + | ------- required by a bound in this associated function + | where | F: HttpServiceFactory + 'static, | ^^^^^^^^^^^^^^^^^^ required by this bound in `App::::service` diff --git a/actix-web-codegen/tests/trybuild/scope-invalid-args.rs b/actix-web-codegen/tests/trybuild/scope-invalid-args.rs new file mode 100644 index 000000000..ec021d5eb --- /dev/null +++ b/actix-web-codegen/tests/trybuild/scope-invalid-args.rs @@ -0,0 +1,14 @@ +use actix_web_codegen::scope; + +const PATH: &str = "/api"; + +#[scope(PATH)] +mod api_const {} + +#[scope(true)] +mod api_bool {} + +#[scope(123)] +mod api_num {} + +fn main() {} diff --git a/actix-web-codegen/tests/trybuild/scope-invalid-args.stderr b/actix-web-codegen/tests/trybuild/scope-invalid-args.stderr new file mode 100644 index 000000000..0ab335966 --- /dev/null +++ b/actix-web-codegen/tests/trybuild/scope-invalid-args.stderr @@ -0,0 +1,17 @@ +error: argument to scope macro is not a string literal, expected: #[scope("/prefix")] + --> tests/trybuild/scope-invalid-args.rs:5:9 + | +5 | #[scope(PATH)] + | ^^^^ + +error: argument to scope macro is not a string literal, expected: #[scope("/prefix")] + --> tests/trybuild/scope-invalid-args.rs:8:9 + | +8 | #[scope(true)] + | ^^^^ + +error: argument to scope macro is not a string literal, expected: #[scope("/prefix")] + --> tests/trybuild/scope-invalid-args.rs:11:9 + | +11 | #[scope(123)] + | ^^^ diff --git a/actix-web-codegen/tests/trybuild/scope-missing-args.rs b/actix-web-codegen/tests/trybuild/scope-missing-args.rs new file mode 100644 index 000000000..39bcb9d1a --- /dev/null +++ b/actix-web-codegen/tests/trybuild/scope-missing-args.rs @@ -0,0 +1,6 @@ +use actix_web_codegen::scope; + +#[scope] +mod api {} + +fn main() {} diff --git a/actix-web-codegen/tests/trybuild/scope-missing-args.stderr b/actix-web-codegen/tests/trybuild/scope-missing-args.stderr new file mode 100644 index 000000000..d59842e39 --- /dev/null +++ b/actix-web-codegen/tests/trybuild/scope-missing-args.stderr @@ -0,0 +1,7 @@ +error: missing arguments for scope macro, expected: #[scope("/prefix")] + --> tests/trybuild/scope-missing-args.rs:3:1 + | +3 | #[scope] + | ^^^^^^^^ + | + = note: this error originates in the attribute macro `scope` (in Nightly builds, run with -Z macro-backtrace for more info) diff --git a/actix-web-codegen/tests/trybuild/scope-on-handler.rs b/actix-web-codegen/tests/trybuild/scope-on-handler.rs new file mode 100644 index 000000000..e5d478981 --- /dev/null +++ b/actix-web-codegen/tests/trybuild/scope-on-handler.rs @@ -0,0 +1,8 @@ +use actix_web_codegen::scope; + +#[scope("/api")] +async fn index() -> &'static str { + "Hello World!" +} + +fn main() {} diff --git a/actix-web-codegen/tests/trybuild/scope-on-handler.stderr b/actix-web-codegen/tests/trybuild/scope-on-handler.stderr new file mode 100644 index 000000000..4491f42dd --- /dev/null +++ b/actix-web-codegen/tests/trybuild/scope-on-handler.stderr @@ -0,0 +1,5 @@ +error: #[scope] macro must be attached to a module + --> tests/trybuild/scope-on-handler.rs:4:1 + | +4 | async fn index() -> &'static str { + | ^^^^^ diff --git a/actix-web-codegen/tests/trybuild/scope-trailing-slash.rs b/actix-web-codegen/tests/trybuild/scope-trailing-slash.rs new file mode 100644 index 000000000..84632b59f --- /dev/null +++ b/actix-web-codegen/tests/trybuild/scope-trailing-slash.rs @@ -0,0 +1,6 @@ +use actix_web_codegen::scope; + +#[scope("/api/")] +mod api {} + +fn main() {} diff --git a/actix-web-codegen/tests/trybuild/scope-trailing-slash.stderr b/actix-web-codegen/tests/trybuild/scope-trailing-slash.stderr new file mode 100644 index 000000000..66933432e --- /dev/null +++ b/actix-web-codegen/tests/trybuild/scope-trailing-slash.stderr @@ -0,0 +1,5 @@ +error: scopes should not have trailing slashes; see https://docs.rs/actix-web/4/actix_web/struct.Scope.html#avoid-trailing-slashes + --> tests/trybuild/scope-trailing-slash.rs:3:9 + | +3 | #[scope("/api/")] + | ^^^^^^^ diff --git a/actix-web/CHANGES.md b/actix-web/CHANGES.md index 953befb7f..d26859a36 100644 --- a/actix-web/CHANGES.md +++ b/actix-web/CHANGES.md @@ -2,6 +2,69 @@ ## Unreleased +### Added + +- Add `middleware::from_fn()` helper. +- Add `web::ThinData` extractor. + +## 4.8.0 + +### Added + +- Add `web::Html` responder. +- Add `HttpRequest::full_url()` method to get the complete URL of the request. + +### Fixed + +- Always remove port from return value of `ConnectionInfo::realip_remote_addr()` when handling IPv6 addresses. from the `Forwarded` header. +- The `UrlencodedError::ContentType` variant (relevant to the `Form` extractor) now uses the 415 (Media Type Unsupported) status code in it's `ResponseError` implementation. +- Apply `HttpServer::max_connection_rate()` setting when using rustls v0.22 or v0.23. + +## 4.7.0 + +### Added + +- Add `#[scope]` macro. +- Add `middleware::Identity` type. +- Add `CustomizeResponder::add_cookie()` method. +- Add `guard::GuardContext::app_data()` method. +- Add `compat-routing-macros-force-pub` crate feature which (on-by-default) which, when disabled, causes handlers to inherit their attached function's visibility. +- Add `compat` crate feature group (on-by-default) which, when disabled, helps with transitioning to some planned v5.0 breaking changes, starting only with `compat-routing-macros-force-pub`. +- Implement `From>` for `Error`. + +## 4.6.0 + +### Added + +- Add `unicode` crate feature (on-by-default) to switch between `regex` and `regex-lite` as a trade-off between full unicode support and binary size. +- Add `rustls-0_23` crate feature. +- Add `HttpServer::{bind_rustls_0_23, listen_rustls_0_23}()` builder methods. +- Add `HttpServer::tls_handshake_timeout()` builder method for `rustls-0_22` and `rustls-0_23`. + +### Changed + +- Update `brotli` dependency to `6`. +- Minimum supported Rust version (MSRV) is now 1.72. + +### Fixed + +- Avoid type confusion with `rustls` in some circumstances. + +## 4.5.1 + +### Fixed + +- Fix missing import when using enabling Rustls v0.22 support. + +## 4.5.0 + +### Added + +- Add `rustls-0_22` crate feature. +- Add `HttpServer::{bind_rustls_0_22, listen_rustls_0_22}()` builder methods. + +## 4.4.1 + ### Changed - Updated `zstd` dependency to `0.13`. diff --git a/actix-web/Cargo.toml b/actix-web/Cargo.toml index d9cf0b94f..f27a5a5b6 100644 --- a/actix-web/Cargo.toml +++ b/actix-web/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "actix-web" -version = "4.4.0" +version = "4.8.0" description = "Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust" authors = [ "Nikolay Kim ", @@ -20,16 +20,58 @@ edition.workspace = true rust-version.workspace = true [package.metadata.docs.rs] -# features that docs.rs will build with -features = ["macros", "openssl", "rustls-0_20", "rustls-0_21", "compress-brotli", "compress-gzip", "compress-zstd", "cookies", "secure-cookies"] rustdoc-args = ["--cfg", "docsrs"] +features = [ + "macros", + "openssl", + "rustls-0_20", + "rustls-0_21", + "rustls-0_22", + "rustls-0_23", + "compress-brotli", + "compress-gzip", + "compress-zstd", + "cookies", + "secure-cookies", +] -[lib] -name = "actix_web" -path = "src/lib.rs" +[package.metadata.cargo_check_external_types] +allowed_external_types = [ + "actix_http::*", + "actix_router::*", + "actix_rt::*", + "actix_server::*", + "actix_service::*", + "actix_utils::*", + "actix_web_codegen::*", + "bytes::*", + "cookie::*", + "cookie", + "futures_core::*", + "http::*", + "language_tags::*", + "mime::*", + "openssl::*", + "rustls::*", + "serde_json::*", + "serde_urlencoded::*", + "serde::*", + "serde::*", + "tokio::*", + "url::*", +] [features] -default = ["macros", "compress-brotli", "compress-gzip", "compress-zstd", "cookies", "http2"] +default = [ + "macros", + "compress-brotli", + "compress-gzip", + "compress-zstd", + "cookies", + "http2", + "unicode", + "compat", +] # Brotli algorithm content-encoding support compress-brotli = ["actix-http/compress-brotli", "__compress"] @@ -39,33 +81,53 @@ compress-gzip = ["actix-http/compress-gzip", "__compress"] compress-zstd = ["actix-http/compress-zstd", "__compress"] # Routing and runtime proc macros -macros = ["actix-macros", "actix-web-codegen"] +macros = ["dep:actix-macros", "dep:actix-web-codegen"] # Cookies support -cookies = ["cookie"] +cookies = ["dep:cookie"] # Secure & signed cookies secure-cookies = ["cookies", "cookie/secure"] +# HTTP/2 support (including h2c). http2 = ["actix-http/http2"] # TLS via OpenSSL -openssl = ["http2", "actix-http/openssl", "actix-tls/accept", "actix-tls/openssl"] +openssl = ["__tls", "http2", "actix-http/openssl", "actix-tls/accept", "actix-tls/openssl"] # TLS via Rustls v0.20 rustls = ["rustls-0_20"] # TLS via Rustls v0.20 -rustls-0_20 = ["http2", "actix-http/rustls-0_20", "actix-tls/accept", "actix-tls/rustls-0_20"] +rustls-0_20 = ["__tls", "http2", "actix-http/rustls-0_20", "actix-tls/accept", "actix-tls/rustls-0_20"] # TLS via Rustls v0.21 -rustls-0_21 = ["http2", "actix-http/rustls-0_21", "actix-tls/accept", "actix-tls/rustls-0_21"] +rustls-0_21 = ["__tls", "http2", "actix-http/rustls-0_21", "actix-tls/accept", "actix-tls/rustls-0_21"] +# TLS via Rustls v0.22 +rustls-0_22 = ["__tls", "http2", "actix-http/rustls-0_22", "actix-tls/accept", "actix-tls/rustls-0_22"] +# TLS via Rustls v0.23 +rustls-0_23 = ["__tls", "http2", "actix-http/rustls-0_23", "actix-tls/accept", "actix-tls/rustls-0_23"] + +# Full unicode support +unicode = ["dep:regex", "actix-router/unicode"] # Internal (PRIVATE!) features used to aid testing and checking feature status. # Don't rely on these whatsoever. They may disappear at anytime. __compress = [] +# Internal (PRIVATE!) features used to aid checking feature status. +# Don't rely on these whatsoever. They may disappear at anytime. +__tls = [] + # io-uring feature only available for Linux OSes. experimental-io-uring = ["actix-server/io-uring"] +# Feature group which, when disabled, helps migrate code to v5.0. +compat = [ + "compat-routing-macros-force-pub", +] + +# Opt-out forwards-compatibility for handler visibility inheritance fix. +compat-routing-macros-force-pub = ["actix-web-codegen?/compat-routing-macros-force-pub"] + [dependencies] actix-codec = "0.5" actix-macros = { version = "0.2.3", optional = true } @@ -73,11 +135,11 @@ actix-rt = { version = "2.6", default-features = false } actix-server = "2" actix-service = "2" actix-utils = "3" -actix-tls = { version = "3.1", default-features = false, optional = true } +actix-tls = { version = "3.4", default-features = false, optional = true } -actix-http = { version = "3.4", features = ["ws"] } -actix-router = "0.5" -actix-web-codegen = { version = "4.2", optional = true } +actix-http = { version = "3.7", features = ["ws"] } +actix-router = { version = "0.5.3", default-features = false, features = ["http"] } +actix-web-codegen = { version = "4.3", optional = true, default-features = false } ahash = "0.8" bytes = "1" @@ -89,12 +151,14 @@ encoding_rs = "0.8" futures-core = { version = "0.3.17", default-features = false } futures-util = { version = "0.3.17", default-features = false } itoa = "1" +impl-more = "0.1.4" language-tags = "0.3" log = "0.4" mime = "0.3" once_cell = "1.5" pin-project-lite = "0.2.7" -regex = "1.5.5" +regex = { version = "1.5.5", optional = true } +regex-lite = "0.1" serde = "1.0" serde_json = "1.0" serde_urlencoded = "0.7" @@ -105,25 +169,29 @@ url = "2.1" [dev-dependencies] actix-files = "0.6" -actix-test = { version = "0.1", features = ["openssl", "rustls-0_21"] } +actix-test = { version = "0.1", features = ["openssl", "rustls-0_23"] } awc = { version = "3", features = ["openssl"] } -brotli = "3.3.3" +brotli = "6" const-str = "0.5" +core_affinity = "0.8" criterion = { version = "0.5", features = ["html_reports"] } -env_logger = "0.10" +env_logger = "0.11" flate2 = "1.0.13" futures-util = { version = "0.3.17", default-features = false, features = ["std"] } rand = "0.8" -rcgen = "0.11" -rustls-pemfile = "1" +rcgen = "0.13" +rustls-pemfile = "2" serde = { version = "1.0", features = ["derive"] } static_assertions = "1" tls-openssl = { package = "openssl", version = "0.10.55" } -tls-rustls = { package = "rustls", version = "0.21" } +tls-rustls = { package = "rustls", version = "0.23" } tokio = { version = "1.24.2", features = ["rt-multi-thread", "macros"] } zstd = "0.13" +[lints] +workspace = true + [[test]] name = "test_server" required-features = ["compress-brotli", "compress-gzip", "compress-zstd", "cookies"] diff --git a/actix-web/MIGRATION-4.0.md b/actix-web/MIGRATION-4.0.md index 0f0bd3a53..08c89635a 100644 --- a/actix-web/MIGRATION-4.0.md +++ b/actix-web/MIGRATION-4.0.md @@ -372,13 +372,13 @@ You may need to review the [guidance on shared mutable state](https://docs.rs/ac HttpServer::new(|| { - App::new() - .data(MyState::default()) -- .service(hander) +- .service(handler) + let my_state: Data = Data::new(MyState::default()); + + App::new() + .app_data(my_state) -+ .service(hander) ++ .service(handler) }) ``` diff --git a/actix-web/README.md b/actix-web/README.md index e83397657..d30d945a4 100644 --- a/actix-web/README.md +++ b/actix-web/README.md @@ -8,10 +8,10 @@ [![crates.io](https://img.shields.io/crates/v/actix-web?label=latest)](https://crates.io/crates/actix-web) -[![Documentation](https://docs.rs/actix-web/badge.svg?version=4.4.0)](https://docs.rs/actix-web/4.4.0) -![MSRV](https://img.shields.io/badge/rustc-1.68+-ab6000.svg) +[![Documentation](https://docs.rs/actix-web/badge.svg?version=4.8.0)](https://docs.rs/actix-web/4.8.0) +![MSRV](https://img.shields.io/badge/rustc-1.72+-ab6000.svg) ![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-web.svg) -[![Dependency Status](https://deps.rs/crate/actix-web/4.4.0/status.svg)](https://deps.rs/crate/actix-web/4.4.0) +[![Dependency Status](https://deps.rs/crate/actix-web/4.8.0/status.svg)](https://deps.rs/crate/actix-web/4.8.0)
[![CI](https://github.com/actix/actix-web/actions/workflows/ci.yml/badge.svg)](https://github.com/actix/actix-web/actions/workflows/ci.yml) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) @@ -37,7 +37,7 @@ - SSL support using OpenSSL or Rustls - Middlewares ([Logger, Session, CORS, etc](https://actix.rs/docs/middleware/)) - Integrates with the [`awc` HTTP client](https://docs.rs/awc/) -- Runs on stable Rust 1.68+ +- Runs on stable Rust 1.72+ ## Documentation @@ -109,4 +109,4 @@ This project is licensed under either of the following licenses, at your option: ## Code of Conduct -Contribution to the actix-web repo is organized under the terms of the Contributor Covenant. The Actix team promises to intervene to uphold that code of conduct. +Contribution to the `actix/actix-web` repo is organized under the terms of the Contributor Covenant. The Actix team promises to intervene to uphold that code of conduct. diff --git a/actix-web/benches/responder.rs b/actix-web/benches/responder.rs index c675eadff..489515e40 100644 --- a/actix-web/benches/responder.rs +++ b/actix-web/benches/responder.rs @@ -2,11 +2,9 @@ use std::{future::Future, time::Instant}; use actix_http::body::BoxBody; use actix_utils::future::{ready, Ready}; -use actix_web::{ - error, http::StatusCode, test::TestRequest, Error, HttpRequest, HttpResponse, Responder, -}; +use actix_web::{http::StatusCode, test::TestRequest, Error, HttpRequest, HttpResponse, Responder}; use criterion::{criterion_group, criterion_main, Criterion}; -use futures_util::future::{join_all, Either}; +use futures_util::future::join_all; // responder simulate the old responder trait. trait FutureResponder { @@ -16,9 +14,6 @@ trait FutureResponder { fn future_respond_to(self, req: &HttpRequest) -> Self::Future; } -// a simple option responder type. -struct OptionResponder(Option); - // a simple wrapper type around string struct StringResponder(String); @@ -34,22 +29,6 @@ impl FutureResponder for StringResponder { } } -impl FutureResponder for OptionResponder -where - T: FutureResponder, - T::Future: Future>, -{ - type Error = Error; - type Future = Either>>; - - fn future_respond_to(self, req: &HttpRequest) -> Self::Future { - match self.0 { - Some(t) => Either::Left(t.future_respond_to(req)), - None => Either::Right(ready(Err(error::ErrorInternalServerError("err")))), - } - } -} - impl Responder for StringResponder { type Body = BoxBody; @@ -60,17 +39,6 @@ impl Responder for StringResponder { } } -impl Responder for OptionResponder { - type Body = BoxBody; - - fn respond_to(self, req: &HttpRequest) -> HttpResponse { - match self.0 { - Some(t) => t.respond_to(req).map_into_boxed_body(), - None => HttpResponse::from_error(error::ErrorInternalServerError("err")), - } - } -} - fn future_responder(c: &mut Criterion) { let rt = actix_rt::System::new(); let req = TestRequest::default().to_http_request(); diff --git a/actix-web/examples/middleware_from_fn.rs b/actix-web/examples/middleware_from_fn.rs new file mode 100644 index 000000000..da92ef05b --- /dev/null +++ b/actix-web/examples/middleware_from_fn.rs @@ -0,0 +1,127 @@ +//! Shows a couple of ways to use the `from_fn` middleware. + +use std::{collections::HashMap, io, rc::Rc, time::Duration}; + +use actix_web::{ + body::MessageBody, + dev::{Service, ServiceRequest, ServiceResponse, Transform}, + http::header::{self, HeaderValue, Range}, + middleware::{from_fn, Logger, Next}, + web::{self, Header, Query}, + App, Error, HttpResponse, HttpServer, +}; + +async fn noop(req: ServiceRequest, next: Next) -> Result, Error> { + next.call(req).await +} + +async fn print_range_header( + range_header: Option>, + req: ServiceRequest, + next: Next, +) -> Result, Error> { + if let Some(Header(range)) = range_header { + println!("Range: {range}"); + } else { + println!("No Range header"); + } + + next.call(req).await +} + +async fn mutate_body_type( + req: ServiceRequest, + next: Next, +) -> Result, Error> { + let res = next.call(req).await?; + Ok(res.map_into_left_body::<()>()) +} + +async fn mutate_body_type_with_extractors( + string_body: String, + query: Query>, + req: ServiceRequest, + next: Next, +) -> Result, Error> { + println!("body is: {string_body}"); + println!("query string: {query:?}"); + + let res = next.call(req).await?; + + Ok(res.map_body(move |_, _| string_body)) +} + +async fn timeout_10secs( + req: ServiceRequest, + next: Next, +) -> Result, Error> { + match tokio::time::timeout(Duration::from_secs(10), next.call(req)).await { + Ok(res) => res, + Err(_err) => Err(actix_web::error::ErrorRequestTimeout("")), + } +} + +struct MyMw(bool); + +impl MyMw { + async fn mw_cb( + &self, + req: ServiceRequest, + next: Next, + ) -> Result, Error> { + let mut res = match self.0 { + true => req.into_response("short-circuited").map_into_right_body(), + false => next.call(req).await?.map_into_left_body(), + }; + + res.headers_mut() + .insert(header::WARNING, HeaderValue::from_static("42")); + + Ok(res) + } + + pub fn into_middleware( + self, + ) -> impl Transform< + S, + ServiceRequest, + Response = ServiceResponse, + Error = Error, + InitError = (), + > + where + S: Service, Error = Error> + 'static, + B: MessageBody + 'static, + { + let this = Rc::new(self); + from_fn(move |req, next| { + let this = Rc::clone(&this); + async move { Self::mw_cb(&this, req, next).await } + }) + } +} + +#[actix_web::main] +async fn main() -> io::Result<()> { + env_logger::init_from_env(env_logger::Env::new().default_filter_or("info")); + + let bind = ("127.0.0.1", 8080); + log::info!("staring server at http://{}:{}", &bind.0, &bind.1); + + HttpServer::new(|| { + App::new() + .wrap(from_fn(noop)) + .wrap(from_fn(print_range_header)) + .wrap(from_fn(mutate_body_type)) + .wrap(from_fn(mutate_body_type_with_extractors)) + .wrap(from_fn(timeout_10secs)) + // switch bool to true to observe early response + .wrap(MyMw(false).into_middleware()) + .wrap(Logger::default()) + .default_service(web::to(HttpResponse::Ok)) + }) + .workers(1) + .bind(bind)? + .run() + .await +} diff --git a/actix-web/examples/worker-cpu-pin.rs b/actix-web/examples/worker-cpu-pin.rs new file mode 100644 index 000000000..58e060821 --- /dev/null +++ b/actix-web/examples/worker-cpu-pin.rs @@ -0,0 +1,41 @@ +use std::{ + io, + sync::{ + atomic::{AtomicUsize, Ordering}, + Arc, + }, + thread, +}; + +use actix_web::{middleware, web, App, HttpServer}; + +async fn hello() -> &'static str { + "Hello world!" +} + +#[actix_web::main] +async fn main() -> io::Result<()> { + env_logger::init_from_env(env_logger::Env::new().default_filter_or("info")); + + let core_ids = core_affinity::get_core_ids().unwrap(); + let n_core_ids = core_ids.len(); + let next_core_id = Arc::new(AtomicUsize::new(0)); + + HttpServer::new(move || { + let pin = Arc::clone(&next_core_id).fetch_add(1, Ordering::AcqRel); + log::info!( + "setting CPU affinity for worker {}: pinning to core {}", + thread::current().name().unwrap(), + pin, + ); + core_affinity::set_for_current(core_ids[pin]); + + App::new() + .wrap(middleware::Logger::default()) + .service(web::resource("/").get(hello)) + }) + .bind(("127.0.0.1", 8080))? + .workers(n_core_ids) + .run() + .await +} diff --git a/actix-web/src/app.rs b/actix-web/src/app.rs index 06b66f62c..240e5b982 100644 --- a/actix-web/src/app.rs +++ b/actix-web/src/app.rs @@ -39,7 +39,7 @@ impl App { let factory_ref = Rc::new(RefCell::new(None)); App { - endpoint: AppEntry::new(factory_ref.clone()), + endpoint: AppEntry::new(Rc::clone(&factory_ref)), data_factories: Vec::new(), services: Vec::new(), default: None, @@ -112,8 +112,8 @@ where /// }) /// ``` #[doc(alias = "manage")] - pub fn app_data(mut self, ext: U) -> Self { - self.extensions.insert(ext); + pub fn app_data(mut self, data: U) -> Self { + self.extensions.insert(data); self } @@ -129,6 +129,8 @@ where /// /// Data items are constructed during application initialization, before the server starts /// accepting requests. + /// + /// The returned data value `D` is wrapped as [`Data`]. pub fn data_factory(mut self, data: F) -> Self where F: Fn() -> Out + 'static, @@ -232,7 +234,6 @@ where /// /// * *Resource* is an entry in resource table which corresponds to requested URL. /// * *Scope* is a set of resources with common root path. - /// * "StaticFiles" is a service for static files support pub fn service(mut self, factory: F) -> Self where F: HttpServiceFactory + 'static, @@ -469,7 +470,6 @@ mod tests { Method, StatusCode, }, middleware::DefaultHeaders, - service::ServiceRequest, test::{call_service, init_service, read_body, try_init_service, TestRequest}, web, HttpRequest, HttpResponse, }; diff --git a/actix-web/src/app_service.rs b/actix-web/src/app_service.rs index f2dca954c..7aa16b790 100644 --- a/actix-web/src/app_service.rs +++ b/actix-web/src/app_service.rs @@ -71,7 +71,7 @@ where }); // create App config to pass to child services - let mut config = AppService::new(config, default.clone()); + let mut config = AppService::new(config, Rc::clone(&default)); // register services mem::take(&mut *self.services.borrow_mut()) @@ -263,8 +263,9 @@ impl ServiceFactory for AppRoutingFactory { let guards = guards.borrow_mut().take().unwrap_or_default(); let factory_fut = factory.new_service(()); async move { - let service = factory_fut.await?; - Ok((path, guards, service)) + factory_fut + .await + .map(move |service| (path, guards, service)) } })); diff --git a/actix-web/src/config.rs b/actix-web/src/config.rs index fba0c2717..0e856f574 100644 --- a/actix-web/src/config.rs +++ b/actix-web/src/config.rs @@ -68,7 +68,7 @@ impl AppService { pub(crate) fn clone_config(&self) -> Self { AppService { config: self.config.clone(), - default: self.default.clone(), + default: Rc::clone(&self.default), services: Vec::new(), root: false, } @@ -81,7 +81,7 @@ impl AppService { /// Returns default handler factory. pub fn default_service(&self) -> Rc { - self.default.clone() + Rc::clone(&self.default) } /// Register HTTP service. @@ -148,7 +148,7 @@ impl AppConfig { #[cfg(test)] pub(crate) fn set_host(&mut self, host: &str) { - self.host = host.to_owned(); + host.clone_into(&mut self.host); } } diff --git a/actix-web/src/data.rs b/actix-web/src/data.rs index ebb98af3f..088df55d2 100644 --- a/actix-web/src/data.rs +++ b/actix-web/src/data.rs @@ -69,7 +69,7 @@ pub(crate) type FnDataFactory = /// HttpResponse::Ok() /// } /// -/// /// Alteratively, use the `HttpRequest::app_data` method to access data in a handler. +/// /// Alternatively, use the `HttpRequest::app_data` method to access data in a handler. /// async fn index_alt(req: HttpRequest) -> impl Responder { /// let data = req.app_data::>>().unwrap(); /// let mut my_data = data.lock().unwrap(); @@ -184,7 +184,7 @@ impl FromRequest for Data { impl DataFactory for Data { fn create(&self, extensions: &mut Extensions) -> bool { - extensions.insert(Data(self.0.clone())); + extensions.insert(Data(Arc::clone(&self.0))); true } } diff --git a/actix-web/src/error/error.rs b/actix-web/src/error/error.rs index 3a5a128f6..670a58a00 100644 --- a/actix-web/src/error/error.rs +++ b/actix-web/src/error/error.rs @@ -60,6 +60,12 @@ impl From for Error { } } +impl From> for Error { + fn from(value: Box) -> Self { + Error { cause: value } + } +} + impl From for Response { fn from(err: Error) -> Response { err.error_response().into() diff --git a/actix-web/src/error/mod.rs b/actix-web/src/error/mod.rs index 91a6bcc3f..25535332c 100644 --- a/actix-web/src/error/mod.rs +++ b/actix-web/src/error/mod.rs @@ -100,6 +100,7 @@ impl ResponseError for UrlencodedError { match self { Self::Overflow { .. } => StatusCode::PAYLOAD_TOO_LARGE, Self::UnknownLength => StatusCode::LENGTH_REQUIRED, + Self::ContentType => StatusCode::UNSUPPORTED_MEDIA_TYPE, Self::Payload(err) => err.status_code(), _ => StatusCode::BAD_REQUEST, } @@ -232,7 +233,7 @@ mod tests { let resp = UrlencodedError::UnknownLength.error_response(); assert_eq!(resp.status(), StatusCode::LENGTH_REQUIRED); let resp = UrlencodedError::ContentType.error_response(); - assert_eq!(resp.status(), StatusCode::BAD_REQUEST); + assert_eq!(resp.status(), StatusCode::UNSUPPORTED_MEDIA_TYPE); } #[test] diff --git a/actix-web/src/guard/acceptable.rs b/actix-web/src/guard/acceptable.rs index a31494a18..8fa7165c8 100644 --- a/actix-web/src/guard/acceptable.rs +++ b/actix-web/src/guard/acceptable.rs @@ -20,7 +20,7 @@ use crate::http::header::Accept; pub struct Acceptable { mime: mime::Mime, - /// Wether to match `*/*` mime type. + /// Whether to match `*/*` mime type. /// /// Defaults to false because it's not very useful otherwise. match_star_star: bool, diff --git a/actix-web/src/guard/host.rs b/actix-web/src/guard/host.rs index f05c81183..a971a3e30 100644 --- a/actix-web/src/guard/host.rs +++ b/actix-web/src/guard/host.rs @@ -2,7 +2,7 @@ use actix_http::{header, uri::Uri, RequestHead}; use super::{Guard, GuardContext}; -/// Creates a guard that matches requests targetting a specific host. +/// Creates a guard that matches requests targeting a specific host. /// /// # Matching Host /// This guard will: diff --git a/actix-web/src/guard/mod.rs b/actix-web/src/guard/mod.rs index 35294a3c4..41609953a 100644 --- a/actix-web/src/guard/mod.rs +++ b/actix-web/src/guard/mod.rs @@ -110,6 +110,12 @@ impl<'a> GuardContext<'a> { pub fn header(&self) -> Option { H::parse(self.req).ok() } + + /// Counterpart to [HttpRequest::app_data](crate::HttpRequest::app_data). + #[inline] + pub fn app_data(&self) -> Option<&T> { + self.req.app_data() + } } /// Interface for routing guards. @@ -380,7 +386,7 @@ impl Guard for HeaderGuard { #[cfg(test)] mod tests { - use actix_http::{header, Method}; + use actix_http::Method; use super::*; use crate::test::TestRequest; @@ -512,4 +518,18 @@ mod tests { .to_srv_request(); assert!(guard.check(&req.guard_ctx())); } + + #[test] + fn app_data() { + const TEST_VALUE: u32 = 42; + let guard = fn_guard(|ctx| dbg!(ctx.app_data::()) == Some(&TEST_VALUE)); + + let req = TestRequest::default().app_data(TEST_VALUE).to_srv_request(); + assert!(guard.check(&req.guard_ctx())); + + let req = TestRequest::default() + .app_data(TEST_VALUE * 2) + .to_srv_request(); + assert!(!guard.check(&req.guard_ctx())); + } } diff --git a/actix-web/src/handler.rs b/actix-web/src/handler.rs index 6e4e2250a..10015cb69 100644 --- a/actix-web/src/handler.rs +++ b/actix-web/src/handler.rs @@ -19,7 +19,7 @@ use crate::{ /// 1. It is an async function (or a function/closure that returns an appropriate future); /// 1. The function parameters (up to 12) implement [`FromRequest`]; /// 1. The async function (or future) resolves to a type that can be converted into an -/// [`HttpResponse`] (i.e., it implements the [`Responder`] trait). +/// [`HttpResponse`] (i.e., it implements the [`Responder`] trait). /// /// /// # Compiler Errors diff --git a/actix-web/src/http/header/content_disposition.rs b/actix-web/src/http/header/content_disposition.rs index 0606f5aef..592fc9f6a 100644 --- a/actix-web/src/http/header/content_disposition.rs +++ b/actix-web/src/http/header/content_disposition.rs @@ -13,7 +13,10 @@ use std::fmt::{self, Write}; use once_cell::sync::Lazy; +#[cfg(feature = "unicode")] use regex::Regex; +#[cfg(not(feature = "unicode"))] +use regex_lite::Regex; use super::{ExtendedValue, Header, TryIntoHeaderValue, Writer}; use crate::http::header; @@ -151,7 +154,7 @@ impl DispositionParam { #[inline] pub fn as_name(&self) -> Option<&str> { match self { - DispositionParam::Name(ref name) => Some(name.as_str()), + DispositionParam::Name(name) => Some(name.as_str()), _ => None, } } @@ -160,7 +163,7 @@ impl DispositionParam { #[inline] pub fn as_filename(&self) -> Option<&str> { match self { - DispositionParam::Filename(ref filename) => Some(filename.as_str()), + DispositionParam::Filename(filename) => Some(filename.as_str()), _ => None, } } @@ -169,7 +172,7 @@ impl DispositionParam { #[inline] pub fn as_filename_ext(&self) -> Option<&ExtendedValue> { match self { - DispositionParam::FilenameExt(ref value) => Some(value), + DispositionParam::FilenameExt(value) => Some(value), _ => None, } } @@ -490,7 +493,7 @@ impl Header for ContentDisposition { } fn parse(msg: &T) -> Result { - if let Some(h) = msg.headers().get(&Self::name()) { + if let Some(h) = msg.headers().get(Self::name()) { Self::from_raw(h) } else { Err(crate::error::ParseError::Header) diff --git a/actix-web/src/http/header/content_length.rs b/actix-web/src/http/header/content_length.rs index ad16dc409..557c7c9f5 100644 --- a/actix-web/src/http/header/content_length.rs +++ b/actix-web/src/http/header/content_length.rs @@ -126,7 +126,7 @@ mod tests { use std::fmt; use super::*; - use crate::{http::header::Header, test::TestRequest, HttpRequest}; + use crate::{test::TestRequest, HttpRequest}; fn req_from_raw_headers, V: AsRef<[u8]>>( header_lines: I, diff --git a/actix-web/src/http/header/range.rs b/actix-web/src/http/header/range.rs index 2326bb19c..4a5d95d93 100644 --- a/actix-web/src/http/header/range.rs +++ b/actix-web/src/http/header/range.rs @@ -107,16 +107,16 @@ impl ByteRangeSpec { /// satisfiable if they meet the following conditions: /// /// > If a valid byte-range-set includes at least one byte-range-spec with a first-byte-pos that - /// is less than the current length of the representation, or at least one - /// suffix-byte-range-spec with a non-zero suffix-length, then the byte-range-set - /// is satisfiable. Otherwise, the byte-range-set is unsatisfiable. + /// > is less than the current length of the representation, or at least one + /// > suffix-byte-range-spec with a non-zero suffix-length, then the byte-range-set is + /// > satisfiable. Otherwise, the byte-range-set is unsatisfiable. /// /// The function also computes remainder ranges based on the RFC: /// /// > If the last-byte-pos value is absent, or if the value is greater than or equal to the - /// current length of the representation data, the byte range is interpreted as the remainder - /// of the representation (i.e., the server replaces the value of last-byte-pos with a value - /// that is one less than the current length of the selected representation). + /// > current length of the representation data, the byte range is interpreted as the remainder + /// > of the representation (i.e., the server replaces the value of last-byte-pos with a value + /// > that is one less than the current length of the selected representation). /// /// [RFC 7233 §2.1]: https://datatracker.ietf.org/doc/html/rfc7233 pub fn to_satisfiable_range(&self, full_length: u64) -> Option<(u64, u64)> { @@ -270,7 +270,7 @@ impl Header for Range { #[inline] fn parse(msg: &T) -> Result { - header::from_one_raw_str(msg.headers().get(&Self::name())) + header::from_one_raw_str(msg.headers().get(Self::name())) } } diff --git a/actix-web/src/info.rs b/actix-web/src/info.rs index c5d9638f4..1b2e554f9 100644 --- a/actix-web/src/info.rs +++ b/actix-web/src/info.rs @@ -21,6 +21,20 @@ fn unquote(val: &str) -> &str { val.trim().trim_start_matches('"').trim_end_matches('"') } +/// Remove port and IPv6 square brackets from a peer specification. +fn bare_address(val: &str) -> &str { + if val.starts_with('[') { + val.split("]:") + .next() + .map(|s| s.trim_start_matches('[').trim_end_matches(']')) + // this indicates that the IPv6 address is malformed so shouldn't + // usually happen, but if it does, just return the original input + .unwrap_or(val) + } else { + val.split(':').next().unwrap_or(val) + } +} + /// Extracts and trims first value for given header name. fn first_header_value<'a>(req: &'a RequestHead, name: &'_ HeaderName) -> Option<&'a str> { let hdr = req.headers.get(name)?.to_str().ok()?; @@ -100,7 +114,7 @@ impl ConnectionInfo { // --- https://datatracker.ietf.org/doc/html/rfc7239#section-5.2 match name.trim().to_lowercase().as_str() { - "for" => realip_remote_addr.get_or_insert_with(|| unquote(val)), + "for" => realip_remote_addr.get_or_insert_with(|| bare_address(unquote(val))), "proto" => scheme.get_or_insert_with(|| unquote(val)), "host" => host.get_or_insert_with(|| unquote(val)), "by" => { @@ -368,16 +382,25 @@ mod tests { .insert_header((header::FORWARDED, r#"for="192.0.2.60:8080""#)) .to_http_request(); let info = req.connection_info(); - assert_eq!(info.realip_remote_addr(), Some("192.0.2.60:8080")); + assert_eq!(info.realip_remote_addr(), Some("192.0.2.60")); } #[test] fn forwarded_for_ipv6() { + let req = TestRequest::default() + .insert_header((header::FORWARDED, r#"for="[2001:db8:cafe::17]""#)) + .to_http_request(); + let info = req.connection_info(); + assert_eq!(info.realip_remote_addr(), Some("2001:db8:cafe::17")); + } + + #[test] + fn forwarded_for_ipv6_with_port() { let req = TestRequest::default() .insert_header((header::FORWARDED, r#"for="[2001:db8:cafe::17]:4711""#)) .to_http_request(); let info = req.connection_info(); - assert_eq!(info.realip_remote_addr(), Some("[2001:db8:cafe::17]:4711")); + assert_eq!(info.realip_remote_addr(), Some("2001:db8:cafe::17")); } #[test] diff --git a/actix-web/src/lib.rs b/actix-web/src/lib.rs index 88f0ae9be..e2a8e2275 100644 --- a/actix-web/src/lib.rs +++ b/actix-web/src/lib.rs @@ -64,11 +64,12 @@ //! - `compress-gzip` - gzip and deflate content encoding compression support (enabled by default) //! - `compress-zstd` - zstd content encoding compression support (enabled by default) //! - `openssl` - HTTPS support via `openssl` crate, supports `HTTP/2` -//! - `rustls` - HTTPS support via `rustls` crate, supports `HTTP/2` +//! - `rustls` - HTTPS support via `rustls` 0.20 crate, supports `HTTP/2` +//! - `rustls-0_21` - HTTPS support via `rustls` 0.21 crate, supports `HTTP/2` +//! - `rustls-0_22` - HTTPS support via `rustls` 0.22 crate, supports `HTTP/2` +//! - `rustls-0_23` - HTTPS support via `rustls` 0.23 crate, supports `HTTP/2` //! - `secure-cookies` - secure cookies support -#![deny(rust_2018_idioms, nonstandard_style)] -#![warn(future_incompatible)] #![doc(html_logo_url = "https://actix.rs/img/logo.png")] #![doc(html_favicon_url = "https://actix.rs/favicon.ico")] #![cfg_attr(docsrs, feature(doc_auto_cfg))] @@ -103,6 +104,7 @@ mod scope; mod server; mod service; pub mod test; +mod thin_data; pub(crate) mod types; pub mod web; @@ -142,5 +144,6 @@ codegen_reexport!(delete); codegen_reexport!(trace); codegen_reexport!(connect); codegen_reexport!(options); +codegen_reexport!(scope); pub(crate) type BoxError = Box; diff --git a/actix-web/src/middleware/compat.rs b/actix-web/src/middleware/compat.rs index 7df510a5c..963dfdabb 100644 --- a/actix-web/src/middleware/compat.rs +++ b/actix-web/src/middleware/compat.rs @@ -38,15 +38,6 @@ pub struct Compat { transform: T, } -#[cfg(test)] -impl Compat { - pub(crate) fn noop() -> Self { - Self { - transform: super::Noop, - } - } -} - impl Compat { /// Wrap a middleware to give it broader compatibility. pub fn new(middleware: T) -> Self { @@ -152,7 +143,7 @@ mod tests { use crate::{ dev::ServiceRequest, http::StatusCode, - middleware::{self, Condition, Logger}, + middleware::{self, Condition, Identity, Logger}, test::{self, call_service, init_service, TestRequest}, web, App, HttpResponse, }; @@ -225,7 +216,7 @@ mod tests { async fn compat_noop_is_noop() { let srv = test::ok_service(); - let mw = Compat::noop() + let mw = Compat::new(Identity) .new_transform(srv.into_service()) .await .unwrap(); diff --git a/actix-web/src/middleware/compress.rs b/actix-web/src/middleware/compress.rs index 8ff518cd3..943868d21 100644 --- a/actix-web/src/middleware/compress.rs +++ b/actix-web/src/middleware/compress.rs @@ -33,7 +33,7 @@ use crate::{ /// considered in this selection process. /// /// # Pre-compressed Payload -/// If you are serving some data is already using a compressed representation (e.g., a gzip +/// If you are serving some data that is already using a compressed representation (e.g., a gzip /// compressed HTML file from disk) you can signal this to `Compress` by setting an appropriate /// `Content-Encoding` header. In addition to preventing double compressing the payload, this header /// is required by the spec when using compressed representations and will inform the client that diff --git a/actix-web/src/middleware/condition.rs b/actix-web/src/middleware/condition.rs index 5e106c11f..5ee4467d9 100644 --- a/actix-web/src/middleware/condition.rs +++ b/actix-web/src/middleware/condition.rs @@ -135,13 +135,13 @@ mod tests { use super::*; use crate::{ body::BoxBody, - dev::{ServiceRequest, ServiceResponse}, + dev::ServiceRequest, error::Result, http::{ header::{HeaderValue, CONTENT_TYPE}, StatusCode, }, - middleware::{self, ErrorHandlerResponse, ErrorHandlers}, + middleware::{self, ErrorHandlerResponse, ErrorHandlers, Identity}, test::{self, TestRequest}, web::Bytes, HttpResponse, @@ -158,7 +158,7 @@ mod tests { #[test] fn compat_with_builtin_middleware() { - let _ = Condition::new(true, middleware::Compat::noop()); + let _ = Condition::new(true, middleware::Compat::new(Identity)); let _ = Condition::new(true, middleware::Logger::default()); let _ = Condition::new(true, middleware::Compress::default()); let _ = Condition::new(true, middleware::NormalizePath::trim()); diff --git a/actix-web/src/middleware/default_headers.rs b/actix-web/src/middleware/default_headers.rs index b5a5a6998..2669a047e 100644 --- a/actix-web/src/middleware/default_headers.rs +++ b/actix-web/src/middleware/default_headers.rs @@ -141,7 +141,7 @@ where actix_service::forward_ready!(service); fn call(&self, req: ServiceRequest) -> Self::Future { - let inner = self.inner.clone(); + let inner = Rc::clone(&self.inner); let fut = self.service.call(req); DefaultHeaderFuture { @@ -190,8 +190,6 @@ mod tests { use super::*; use crate::{ - dev::ServiceRequest, - http::header::CONTENT_TYPE, test::{self, TestRequest}, HttpResponse, }; diff --git a/actix-web/src/middleware/err_handlers.rs b/actix-web/src/middleware/err_handlers.rs index e640bba08..3c50d5c8f 100644 --- a/actix-web/src/middleware/err_handlers.rs +++ b/actix-web/src/middleware/err_handlers.rs @@ -220,16 +220,20 @@ impl ErrorHandlers { /// [`.handler()`][ErrorHandlers::handler]) will fall back on this. /// /// Note that this will overwrite any default handlers previously set by calling - /// [`.default_handler_client()`][ErrorHandlers::default_handler_client] or - /// [`.default_handler_server()`][ErrorHandlers::default_handler_server], but not any set by - /// calling [`.handler()`][ErrorHandlers::handler]. + /// [`default_handler_client()`] or [`.default_handler_server()`], but not any set by calling + /// [`.handler()`]. + /// + /// [`default_handler_client()`]: ErrorHandlers::default_handler_client + /// [`.default_handler_server()`]: ErrorHandlers::default_handler_server + /// [`.handler()`]: ErrorHandlers::handler pub fn default_handler(self, handler: F) -> Self where F: Fn(ServiceResponse) -> Result> + 'static, { let handler = Rc::new(handler); + let handler2 = Rc::clone(&handler); Self { - default_server: Some(handler.clone()), + default_server: Some(handler2), default_client: Some(handler), ..self } @@ -288,7 +292,7 @@ where type Future = LocalBoxFuture<'static, Result>; fn new_transform(&self, service: S) -> Self::Future { - let handlers = self.handlers.clone(); + let handlers = Rc::clone(&self.handlers); let default_client = self.default_client.clone(); let default_server = self.default_server.clone(); Box::pin(async move { @@ -323,7 +327,7 @@ where actix_service::forward_ready!(service); fn call(&self, req: ServiceRequest) -> Self::Future { - let handlers = self.handlers.clone(); + let handlers = Rc::clone(&self.handlers); let default_client = self.default_client.clone(); let default_server = self.default_server.clone(); let fut = self.service.call(req); @@ -407,10 +411,7 @@ mod tests { use super::*; use crate::{ body, - http::{ - header::{HeaderValue, CONTENT_TYPE}, - StatusCode, - }, + http::header::{HeaderValue, CONTENT_TYPE}, test::{self, TestRequest}, }; diff --git a/actix-web/src/middleware/from_fn.rs b/actix-web/src/middleware/from_fn.rs new file mode 100644 index 000000000..608833319 --- /dev/null +++ b/actix-web/src/middleware/from_fn.rs @@ -0,0 +1,349 @@ +use std::{future::Future, marker::PhantomData, rc::Rc}; + +use actix_service::boxed::{self, BoxFuture, RcService}; +use actix_utils::future::{ready, Ready}; +use futures_core::future::LocalBoxFuture; + +use crate::{ + body::MessageBody, + dev::{forward_ready, Service, ServiceRequest, ServiceResponse, Transform}, + Error, FromRequest, +}; + +/// Wraps an async function to be used as a middleware. +/// +/// # Examples +/// +/// The wrapped function should have the following form: +/// +/// ``` +/// # use actix_web::{ +/// # App, Error, +/// # body::MessageBody, +/// # dev::{ServiceRequest, ServiceResponse, Service as _}, +/// # }; +/// use actix_web::middleware::{self, Next}; +/// +/// async fn my_mw( +/// req: ServiceRequest, +/// next: Next, +/// ) -> Result, Error> { +/// // pre-processing +/// next.call(req).await +/// // post-processing +/// } +/// # App::new().wrap(middleware::from_fn(my_mw)); +/// ``` +/// +/// Then use in an app builder like this: +/// +/// ``` +/// use actix_web::{ +/// App, Error, +/// dev::{ServiceRequest, ServiceResponse, Service as _}, +/// }; +/// use actix_web::middleware::from_fn; +/// # use actix_web::middleware::Next; +/// # async fn my_mw(req: ServiceRequest, next: Next) -> Result, Error> { +/// # next.call(req).await +/// # } +/// +/// App::new() +/// .wrap(from_fn(my_mw)) +/// # ; +/// ``` +/// +/// It is also possible to write a middleware that automatically uses extractors, similar to request +/// handlers, by declaring them as the first parameters. As usual, **take care with extractors that +/// consume the body stream**, since handlers will no longer be able to read it again without +/// putting the body "back" into the request object within your middleware. +/// +/// ``` +/// # use std::collections::HashMap; +/// # use actix_web::{ +/// # App, Error, +/// # body::MessageBody, +/// # dev::{ServiceRequest, ServiceResponse}, +/// # http::header::{Accept, Date}, +/// # web::{Header, Query}, +/// # }; +/// use actix_web::middleware::Next; +/// +/// async fn my_extracting_mw( +/// accept: Header, +/// query: Query>, +/// req: ServiceRequest, +/// next: Next, +/// ) -> Result, Error> { +/// // pre-processing +/// next.call(req).await +/// // post-processing +/// } +/// # App::new().wrap(actix_web::middleware::from_fn(my_extracting_mw)); +pub fn from_fn(mw_fn: F) -> MiddlewareFn { + MiddlewareFn { + mw_fn: Rc::new(mw_fn), + _phantom: PhantomData, + } +} + +/// Middleware transform for [`from_fn`]. +#[allow(missing_debug_implementations)] +pub struct MiddlewareFn { + mw_fn: Rc, + _phantom: PhantomData, +} + +impl Transform for MiddlewareFn +where + S: Service, Error = Error> + 'static, + F: Fn(ServiceRequest, Next) -> Fut + 'static, + Fut: Future, Error>>, + B2: MessageBody, +{ + type Response = ServiceResponse; + type Error = Error; + type Transform = MiddlewareFnService; + type InitError = (); + type Future = Ready>; + + fn new_transform(&self, service: S) -> Self::Future { + ready(Ok(MiddlewareFnService { + service: boxed::rc_service(service), + mw_fn: Rc::clone(&self.mw_fn), + _phantom: PhantomData, + })) + } +} + +/// Middleware service for [`from_fn`]. +#[allow(missing_debug_implementations)] +pub struct MiddlewareFnService { + service: RcService, Error>, + mw_fn: Rc, + _phantom: PhantomData<(B, Es)>, +} + +impl Service for MiddlewareFnService +where + F: Fn(ServiceRequest, Next) -> Fut, + Fut: Future, Error>>, + B2: MessageBody, +{ + type Response = ServiceResponse; + type Error = Error; + type Future = Fut; + + forward_ready!(service); + + fn call(&self, req: ServiceRequest) -> Self::Future { + (self.mw_fn)( + req, + Next:: { + service: Rc::clone(&self.service), + }, + ) + } +} + +macro_rules! impl_middleware_fn_service { + ($($ext_type:ident),*) => { + impl Transform for MiddlewareFn + where + S: Service, Error = Error> + 'static, + F: Fn($($ext_type),*, ServiceRequest, Next) -> Fut + 'static, + $($ext_type: FromRequest + 'static,)* + Fut: Future, Error>> + 'static, + B: MessageBody + 'static, + B2: MessageBody + 'static, + { + type Response = ServiceResponse; + type Error = Error; + type Transform = MiddlewareFnService; + type InitError = (); + type Future = Ready>; + + fn new_transform(&self, service: S) -> Self::Future { + ready(Ok(MiddlewareFnService { + service: boxed::rc_service(service), + mw_fn: Rc::clone(&self.mw_fn), + _phantom: PhantomData, + })) + } + } + + impl Service + for MiddlewareFnService + where + F: Fn( + $($ext_type),*, + ServiceRequest, + Next + ) -> Fut + 'static, + $($ext_type: FromRequest + 'static,)* + Fut: Future, Error>> + 'static, + B2: MessageBody + 'static, + { + type Response = ServiceResponse; + type Error = Error; + type Future = LocalBoxFuture<'static, Result>; + + forward_ready!(service); + + #[allow(nonstandard_style)] + fn call(&self, mut req: ServiceRequest) -> Self::Future { + let mw_fn = Rc::clone(&self.mw_fn); + let service = Rc::clone(&self.service); + + Box::pin(async move { + let ($($ext_type,)*) = req.extract::<($($ext_type,)*)>().await?; + + (mw_fn)($($ext_type),*, req, Next:: { service }).await + }) + } + } + }; +} + +impl_middleware_fn_service!(E1); +impl_middleware_fn_service!(E1, E2); +impl_middleware_fn_service!(E1, E2, E3); +impl_middleware_fn_service!(E1, E2, E3, E4); +impl_middleware_fn_service!(E1, E2, E3, E4, E5); +impl_middleware_fn_service!(E1, E2, E3, E4, E5, E6); +impl_middleware_fn_service!(E1, E2, E3, E4, E5, E6, E7); +impl_middleware_fn_service!(E1, E2, E3, E4, E5, E6, E7, E8); +impl_middleware_fn_service!(E1, E2, E3, E4, E5, E6, E7, E8, E9); + +/// Wraps the "next" service in the middleware chain. +#[allow(missing_debug_implementations)] +pub struct Next { + service: RcService, Error>, +} + +impl Next { + /// Equivalent to `Service::call(self, req)`. + pub fn call(&self, req: ServiceRequest) -> >::Future { + Service::call(self, req) + } +} + +impl Service for Next { + type Response = ServiceResponse; + type Error = Error; + type Future = BoxFuture>; + + forward_ready!(service); + + fn call(&self, req: ServiceRequest) -> Self::Future { + self.service.call(req) + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::{ + http::header::{self, HeaderValue}, + middleware::{Compat, Logger}, + test, web, App, HttpResponse, + }; + + async fn noop(req: ServiceRequest, next: Next) -> Result, Error> { + next.call(req).await + } + + async fn add_res_header( + req: ServiceRequest, + next: Next, + ) -> Result, Error> { + let mut res = next.call(req).await?; + res.headers_mut() + .insert(header::WARNING, HeaderValue::from_static("42")); + Ok(res) + } + + async fn mutate_body_type( + req: ServiceRequest, + next: Next, + ) -> Result, Error> { + let res = next.call(req).await?; + Ok(res.map_into_left_body::<()>()) + } + + struct MyMw(bool); + + impl MyMw { + async fn mw_cb( + &self, + req: ServiceRequest, + next: Next, + ) -> Result, Error> { + let mut res = match self.0 { + true => req.into_response("short-circuited").map_into_right_body(), + false => next.call(req).await?.map_into_left_body(), + }; + res.headers_mut() + .insert(header::WARNING, HeaderValue::from_static("42")); + Ok(res) + } + + pub fn into_middleware( + self, + ) -> impl Transform< + S, + ServiceRequest, + Response = ServiceResponse, + Error = Error, + InitError = (), + > + where + S: Service, Error = Error> + 'static, + B: MessageBody + 'static, + { + let this = Rc::new(self); + from_fn(move |req, next| { + let this = Rc::clone(&this); + async move { Self::mw_cb(&this, req, next).await } + }) + } + } + + #[actix_rt::test] + async fn compat_compat() { + let _ = App::new().wrap(Compat::new(from_fn(noop))); + let _ = App::new().wrap(Compat::new(from_fn(mutate_body_type))); + } + + #[actix_rt::test] + async fn permits_different_in_and_out_body_types() { + let app = test::init_service( + App::new() + .wrap(from_fn(mutate_body_type)) + .wrap(from_fn(add_res_header)) + .wrap(Logger::default()) + .wrap(from_fn(noop)) + .default_service(web::to(HttpResponse::NotFound)), + ) + .await; + + let req = test::TestRequest::default().to_request(); + let res = test::call_service(&app, req).await; + assert!(res.headers().contains_key(header::WARNING)); + } + + #[actix_rt::test] + async fn closure_capture_and_return_from_fn() { + let app = test::init_service( + App::new() + .wrap(Logger::default()) + .wrap(MyMw(true).into_middleware()) + .wrap(Logger::default()), + ) + .await; + + let req = test::TestRequest::default().to_request(); + let res = test::call_service(&app, req).await; + assert!(res.headers().contains_key(header::WARNING)); + } +} diff --git a/actix-web/src/middleware/noop.rs b/actix-web/src/middleware/identity.rs similarity index 57% rename from actix-web/src/middleware/noop.rs rename to actix-web/src/middleware/identity.rs index ae7da1d81..de374a57b 100644 --- a/actix-web/src/middleware/noop.rs +++ b/actix-web/src/middleware/identity.rs @@ -2,35 +2,39 @@ use actix_utils::future::{ready, Ready}; -use crate::dev::{Service, Transform}; +use crate::dev::{forward_ready, Service, Transform}; /// A no-op middleware that passes through request and response untouched. -pub(crate) struct Noop; +#[derive(Debug, Clone, Default)] +#[non_exhaustive] +pub struct Identity; -impl, Req> Transform for Noop { +impl, Req> Transform for Identity { type Response = S::Response; type Error = S::Error; - type Transform = NoopService; + type Transform = IdentityMiddleware; type InitError = (); type Future = Ready>; + #[inline] fn new_transform(&self, service: S) -> Self::Future { - ready(Ok(NoopService { service })) + ready(Ok(IdentityMiddleware { service })) } } #[doc(hidden)] -pub(crate) struct NoopService { +pub struct IdentityMiddleware { service: S, } -impl, Req> Service for NoopService { +impl, Req> Service for IdentityMiddleware { type Response = S::Response; type Error = S::Error; type Future = S::Future; - crate::dev::forward_ready!(service); + forward_ready!(service); + #[inline] fn call(&self, req: Req) -> Self::Future { self.service.call(req) } diff --git a/actix-web/src/middleware/logger.rs b/actix-web/src/middleware/logger.rs index ce2caacd9..21986baae 100644 --- a/actix-web/src/middleware/logger.rs +++ b/actix-web/src/middleware/logger.rs @@ -18,7 +18,10 @@ use bytes::Bytes; use futures_core::ready; use log::{debug, warn}; use pin_project_lite::pin_project; -use regex::{Regex, RegexSet}; +#[cfg(feature = "unicode")] +use regex::Regex; +#[cfg(not(feature = "unicode"))] +use regex_lite::Regex; use time::{format_description::well_known::Rfc3339, OffsetDateTime}; use crate::{ @@ -87,7 +90,7 @@ pub struct Logger(Rc); struct Inner { format: Format, exclude: HashSet, - exclude_regex: RegexSet, + exclude_regex: Vec, log_target: Cow<'static, str>, } @@ -97,7 +100,7 @@ impl Logger { Logger(Rc::new(Inner { format: Format::new(format), exclude: HashSet::new(), - exclude_regex: RegexSet::empty(), + exclude_regex: Vec::new(), log_target: Cow::Borrowed(module_path!()), })) } @@ -114,10 +117,7 @@ impl Logger { /// Ignore and do not log access info for paths that match regex. pub fn exclude_regex>(mut self, path: T) -> Self { let inner = Rc::get_mut(&mut self.0).unwrap(); - let mut patterns = inner.exclude_regex.patterns().to_vec(); - patterns.push(path.into()); - let regex_set = RegexSet::new(patterns).unwrap(); - inner.exclude_regex = regex_set; + inner.exclude_regex.push(Regex::new(&path.into()).unwrap()); self } @@ -240,7 +240,7 @@ impl Default for Logger { Logger(Rc::new(Inner { format: Format::default(), exclude: HashSet::new(), - exclude_regex: RegexSet::empty(), + exclude_regex: Vec::new(), log_target: Cow::Borrowed(module_path!()), })) } @@ -276,7 +276,7 @@ where ready(Ok(LoggerMiddleware { service, - inner: self.0.clone(), + inner: Rc::clone(&self.0), })) } } @@ -300,7 +300,11 @@ where fn call(&self, req: ServiceRequest) -> Self::Future { let excluded = self.inner.exclude.contains(req.path()) - || self.inner.exclude_regex.is_match(req.path()); + || self + .inner + .exclude_regex + .iter() + .any(|r| r.is_match(req.path())); if excluded { LoggerResponse { @@ -618,11 +622,7 @@ impl FormatText { FormatText::ResponseHeader(ref name) => { let s = if let Some(val) = res.headers().get(name) { - if let Ok(s) = val.to_str() { - s - } else { - "-" - } + val.to_str().unwrap_or("-") } else { "-" }; @@ -666,11 +666,7 @@ impl FormatText { FormatText::RequestTime => *self = FormatText::Str(now.format(&Rfc3339).unwrap()), FormatText::RequestHeader(ref name) => { let s = if let Some(val) = req.headers().get(name) { - if let Ok(s) = val.to_str() { - s - } else { - "-" - } + val.to_str().unwrap_or("-") } else { "-" }; @@ -716,7 +712,7 @@ impl<'a> fmt::Display for FormatDisplay<'a> { #[cfg(test)] mod tests { - use actix_service::{IntoService, Service, Transform}; + use actix_service::IntoService; use actix_utils::future::ok; use super::*; diff --git a/actix-web/src/middleware/mod.rs b/actix-web/src/middleware/mod.rs index ed61556a3..4b5b3e896 100644 --- a/actix-web/src/middleware/mod.rs +++ b/actix-web/src/middleware/mod.rs @@ -15,10 +15,47 @@ //! - Access external services (e.g., [sessions](https://docs.rs/actix-session), etc.) //! //! Middleware is registered for each [`App`], [`Scope`](crate::Scope), or -//! [`Resource`](crate::Resource) and executed in opposite order as registration. In general, a -//! middleware is a pair of types that implements the [`Service`] trait and [`Transform`] trait, -//! respectively. The [`new_transform`] and [`call`] methods must return a [`Future`], though it -//! can often be [an immediately-ready one](actix_utils::future::Ready). +//! [`Resource`](crate::Resource) and executed in opposite order as registration. +//! +//! # Simple Middleware +//! +//! In many cases, you can model your middleware as an async function via the [`from_fn()`] helper +//! that provides a natural interface for implementing your desired behaviors. +//! +//! ``` +//! # use actix_web::{ +//! # App, Error, +//! # body::MessageBody, +//! # dev::{ServiceRequest, ServiceResponse, Service as _}, +//! # }; +//! use actix_web::middleware::{self, Next}; +//! +//! async fn my_mw( +//! req: ServiceRequest, +//! next: Next, +//! ) -> Result, Error> { +//! // pre-processing +//! +//! // invoke the wrapped middleware or service +//! let res = next.call(req).await?; +//! +//! // post-processing +//! +//! Ok(res) +//! } +//! +//! App::new() +//! .wrap(middleware::from_fn(my_mw)); +//! ``` +//! +//! ## Complex Middleware +//! +//! In the more general ase, a middleware is a pair of types that implements the [`Service`] trait +//! and [`Transform`] trait, respectively. The [`new_transform`] and [`call`] methods must return a +//! [`Future`], though it can often be [an immediately-ready one](actix_utils::future::Ready). +//! +//! All the built-in middleware use this pattern with pairs of builder (`Transform`) + +//! implementation (`Service`) types. //! //! # Ordering //! @@ -33,13 +70,13 @@ //! //! # fn main() { //! # // These aren't snake_case, because they are supposed to be unit structs. -//! # let MiddlewareA = middleware::Compress::default(); -//! # let MiddlewareB = middleware::Compress::default(); -//! # let MiddlewareC = middleware::Compress::default(); +//! # type MiddlewareA = middleware::Compress; +//! # type MiddlewareB = middleware::Compress; +//! # type MiddlewareC = middleware::Compress; //! let app = App::new() -//! .wrap(MiddlewareA) -//! .wrap(MiddlewareB) -//! .wrap(MiddlewareC) +//! .wrap(MiddlewareA::default()) +//! .wrap(MiddlewareB::default()) +//! .wrap(MiddlewareC::default()) //! .service(service); //! # } //! ``` @@ -67,7 +104,7 @@ //! Response //! ``` //! The request _first_ gets processed by the middleware specified _last_ - `MiddlewareC`. It passes -//! the request (modified a modified one) to the next middleware - `MiddlewareB` - _or_ directly +//! the request (possibly a modified one) to the next middleware - `MiddlewareB` - _or_ directly //! responds to the request (e.g. when the request was invalid or an error occurred). `MiddlewareB` //! processes the request as well and passes it to `MiddlewareA`, which then passes it to the //! [`Service`]. In the [`Service`], the extractors will run first. They don't pass the request on, @@ -196,18 +233,6 @@ //! # } //! ``` //! -//! # Simpler Middleware -//! -//! In many cases, you _can_ actually use an async function via a helper that will provide a more -//! natural flow for your behavior. -//! -//! The experimental `actix_web_lab` crate provides a [`from_fn`][lab_from_fn] utility which allows -//! an async fn to be wrapped and used in the same way as other middleware. See the -//! [`from_fn`][lab_from_fn] docs for more info and examples of it's use. -//! -//! While [`from_fn`][lab_from_fn] is experimental currently, it's likely this helper will graduate -//! to Actix Web in some form, so feedback is appreciated. -//! //! [`Future`]: std::future::Future //! [`App`]: crate::App //! [`FromRequest`]: crate::FromRequest @@ -215,34 +240,32 @@ //! [`Transform`]: crate::dev::Transform //! [`call`]: crate::dev::Service::call() //! [`new_transform`]: crate::dev::Transform::new_transform() -//! [lab_from_fn]: https://docs.rs/actix-web-lab/latest/actix_web_lab/middleware/fn.from_fn.html +//! [`from_fn`]: crate mod compat; +#[cfg(feature = "__compress")] +mod compress; mod condition; mod default_headers; mod err_handlers; +mod from_fn; +mod identity; mod logger; -#[cfg(test)] -mod noop; mod normalize; -#[cfg(test)] -pub(crate) use self::noop::Noop; +#[cfg(feature = "__compress")] +pub use self::compress::Compress; pub use self::{ compat::Compat, condition::Condition, default_headers::DefaultHeaders, err_handlers::{ErrorHandlerResponse, ErrorHandlers}, + from_fn::{from_fn, Next}, + identity::Identity, logger::Logger, normalize::{NormalizePath, TrailingSlash}, }; -#[cfg(feature = "__compress")] -mod compress; - -#[cfg(feature = "__compress")] -pub use self::compress::Compress; - #[cfg(test)] mod tests { use super::*; diff --git a/actix-web/src/middleware/normalize.rs b/actix-web/src/middleware/normalize.rs index afcc0faac..482107ecb 100644 --- a/actix-web/src/middleware/normalize.rs +++ b/actix-web/src/middleware/normalize.rs @@ -4,7 +4,10 @@ use actix_http::uri::{PathAndQuery, Uri}; use actix_service::{Service, Transform}; use actix_utils::future::{ready, Ready}; use bytes::Bytes; +#[cfg(feature = "unicode")] use regex::Regex; +#[cfg(not(feature = "unicode"))] +use regex_lite::Regex; use crate::{ service::{ServiceRequest, ServiceResponse}, @@ -205,7 +208,6 @@ mod tests { use super::*; use crate::{ - dev::ServiceRequest, guard::fn_guard, test::{call_service, init_service, TestRequest}, web, App, HttpResponse, diff --git a/actix-web/src/redirect.rs b/actix-web/src/redirect.rs index 5ce960aa4..bd29a1403 100644 --- a/actix-web/src/redirect.rs +++ b/actix-web/src/redirect.rs @@ -182,7 +182,7 @@ impl Responder for Redirect { #[cfg(test)] mod tests { use super::*; - use crate::{dev::Service, http::StatusCode, test, App}; + use crate::{dev::Service, test, App}; #[actix_rt::test] async fn absolute_redirects() { diff --git a/actix-web/src/request.rs b/actix-web/src/request.rs index ece36a388..47b3e3d88 100644 --- a/actix-web/src/request.rs +++ b/actix-web/src/request.rs @@ -91,6 +91,35 @@ impl HttpRequest { &self.head().uri } + /// Returns request's original full URL. + /// + /// Reconstructed URL is best-effort, using [`connection_info`](HttpRequest::connection_info()) + /// to get forwarded scheme & host. + /// + /// ``` + /// use actix_web::test::TestRequest; + /// let req = TestRequest::with_uri("http://10.1.2.3:8443/api?id=4&name=foo") + /// .insert_header(("host", "example.com")) + /// .to_http_request(); + /// + /// assert_eq!( + /// req.full_url().as_str(), + /// "http://example.com/api?id=4&name=foo", + /// ); + /// ``` + pub fn full_url(&self) -> url::Url { + let info = self.connection_info(); + let scheme = info.scheme(); + let host = info.host(); + let path_and_query = self + .uri() + .path_and_query() + .map(|paq| paq.as_str()) + .unwrap_or("/"); + + url::Url::parse(&format!("{scheme}://{host}{path_and_query}")).unwrap() + } + /// Read the Request method. #[inline] pub fn method(&self) -> &Method { @@ -523,7 +552,7 @@ mod tests { use super::*; use crate::{ - dev::{ResourceDef, ResourceMap, Service}, + dev::{ResourceDef, Service}, http::{header, StatusCode}, test::{self, call_service, init_service, read_body, TestRequest}, web, App, HttpResponse, @@ -963,4 +992,27 @@ mod tests { assert!(format!("{:?}", req).contains(location_header)); } + + #[test] + fn check_full_url() { + let req = TestRequest::with_uri("/api?id=4&name=foo").to_http_request(); + assert_eq!( + req.full_url().as_str(), + "http://localhost:8080/api?id=4&name=foo", + ); + + let req = TestRequest::with_uri("https://example.com/api?id=4&name=foo").to_http_request(); + assert_eq!( + req.full_url().as_str(), + "https://example.com/api?id=4&name=foo", + ); + + let req = TestRequest::with_uri("http://10.1.2.3:8443/api?id=4&name=foo") + .insert_header(("host", "example.com")) + .to_http_request(); + assert_eq!( + req.full_url().as_str(), + "http://example.com/api?id=4&name=foo", + ); + } } diff --git a/actix-web/src/resource.rs b/actix-web/src/resource.rs index 95185b80a..d89438edb 100644 --- a/actix-web/src/resource.rs +++ b/actix-web/src/resource.rs @@ -62,14 +62,14 @@ pub struct Resource { impl Resource { /// Constructs new resource that matches a `path` pattern. pub fn new(path: T) -> Resource { - let fref = Rc::new(RefCell::new(None)); + let factory_ref = Rc::new(RefCell::new(None)); Resource { routes: Vec::new(), rdef: path.patterns(), name: None, - endpoint: ResourceEndpoint::new(fref.clone()), - factory_ref: fref, + endpoint: ResourceEndpoint::new(Rc::clone(&factory_ref)), + factory_ref, guards: Vec::new(), app_data: None, default: boxed::factory(fn_service(|req: ServiceRequest| async { @@ -540,20 +540,14 @@ mod tests { use std::time::Duration; use actix_rt::time::sleep; - use actix_service::Service; use actix_utils::future::ok; use super::*; use crate::{ - guard, - http::{ - header::{self, HeaderValue}, - Method, StatusCode, - }, + http::{header::HeaderValue, Method, StatusCode}, middleware::DefaultHeaders, - service::{ServiceRequest, ServiceResponse}, test::{call_service, init_service, TestRequest}, - web, App, Error, HttpMessage, HttpResponse, + App, HttpMessage, }; #[test] @@ -777,7 +771,7 @@ mod tests { data3: web::Data| { assert_eq!(**data1, 10); assert_eq!(**data2, '*'); - let error = std::f64::EPSILON; + let error = f64::EPSILON; assert!((**data3 - 1.0).abs() < error); HttpResponse::Ok() }, diff --git a/actix-web/src/response/builder.rs b/actix-web/src/response/builder.rs index 28a0adffd..c23de8e36 100644 --- a/actix-web/src/response/builder.rs +++ b/actix-web/src/response/builder.rs @@ -408,10 +408,7 @@ mod tests { use super::*; use crate::{ body, - http::{ - header::{self, HeaderValue, CONTENT_TYPE}, - StatusCode, - }, + http::header::{HeaderValue, CONTENT_TYPE}, test::assert_body_eq, }; @@ -466,7 +463,7 @@ mod tests { // content type override let res = HttpResponse::Ok() .insert_header((CONTENT_TYPE, "text/json")) - .json(&vec!["v1", "v2", "v3"]); + .json(["v1", "v2", "v3"]); let ct = res.headers().get(CONTENT_TYPE).unwrap(); assert_eq!(ct, HeaderValue::from_static("text/json")); assert_body_eq!(res, br#"["v1","v2","v3"]"#); diff --git a/actix-web/src/response/customize_responder.rs b/actix-web/src/response/customize_responder.rs index aad0039e0..6a43ac5e6 100644 --- a/actix-web/src/response/customize_responder.rs +++ b/actix-web/src/response/customize_responder.rs @@ -7,7 +7,7 @@ use actix_http::{ use crate::{HttpRequest, HttpResponse, Responder}; -/// Allows overriding status code and headers for a [`Responder`]. +/// Allows overriding status code and headers (including cookies) for a [`Responder`]. /// /// Created by calling the [`customize`](Responder::customize) method on a [`Responder`] type. pub struct CustomizeResponder { @@ -137,6 +137,29 @@ impl CustomizeResponder { Some(&mut self.inner) } } + + /// Appends a `cookie` to the final response. + /// + /// # Errors + /// + /// Final response will be an error if `cookie` cannot be converted into a valid header value. + #[cfg(feature = "cookies")] + pub fn add_cookie(mut self, cookie: &crate::cookie::Cookie<'_>) -> Self { + use actix_http::header::{TryIntoHeaderValue as _, SET_COOKIE}; + + if let Some(inner) = self.inner() { + match cookie.to_string().try_into_value() { + Ok(val) => { + inner.append_headers.append(SET_COOKIE, val); + } + Err(err) => { + self.error = Some(err.into()); + } + } + } + + self + } } impl Responder for CustomizeResponder @@ -175,10 +198,8 @@ mod tests { use super::*; use crate::{ - http::{ - header::{HeaderValue, CONTENT_TYPE}, - StatusCode, - }, + cookie::Cookie, + http::header::{HeaderValue, CONTENT_TYPE}, test::TestRequest, }; @@ -212,6 +233,22 @@ mod tests { to_bytes(res.into_body()).await.unwrap(), Bytes::from_static(b"test"), ); + + let res = "test" + .to_string() + .customize() + .add_cookie(&Cookie::new("name", "value")) + .respond_to(&req); + + assert!(res.status().is_success()); + assert_eq!( + res.cookies().collect::>>(), + vec![Cookie::new("name", "value")], + ); + assert_eq!( + to_bytes(res.into_body()).await.unwrap(), + Bytes::from_static(b"test"), + ); } #[actix_rt::test] diff --git a/actix-web/src/response/responder.rs b/actix-web/src/response/responder.rs index 7d0b0e585..90d8f6e52 100644 --- a/actix-web/src/response/responder.rs +++ b/actix-web/src/response/responder.rs @@ -188,15 +188,11 @@ impl_into_string_responder!(Cow<'_, str>); pub(crate) mod tests { use actix_http::body::to_bytes; use actix_service::Service; - use bytes::{Bytes, BytesMut}; use super::*; use crate::{ error, - http::{ - header::{HeaderValue, CONTENT_TYPE}, - StatusCode, - }, + http::header::{HeaderValue, CONTENT_TYPE}, test::{assert_body_eq, init_service, TestRequest}, web, App, }; diff --git a/actix-web/src/response/response.rs b/actix-web/src/response/response.rs index fbd87e10c..e16dc0cd9 100644 --- a/actix-web/src/response/response.rs +++ b/actix-web/src/response/response.rs @@ -399,7 +399,7 @@ mod tests { use static_assertions::assert_impl_all; use super::*; - use crate::http::header::{HeaderValue, COOKIE}; + use crate::http::header::COOKIE; assert_impl_all!(HttpResponse: Responder); assert_impl_all!(HttpResponse: Responder); diff --git a/actix-web/src/route.rs b/actix-web/src/route.rs index a46c1fdd4..e05e6be52 100644 --- a/actix-web/src/route.rs +++ b/actix-web/src/route.rs @@ -77,7 +77,7 @@ impl ServiceFactory for Route { fn new_service(&self, _: ()) -> Self::Future { let fut = self.service.new_service(()); - let guards = self.guards.clone(); + let guards = Rc::clone(&self.guards); Box::pin(async move { let service = fut.await?; @@ -92,6 +92,7 @@ pub struct RouteService { } impl RouteService { + // TODO(breaking): remove pass by ref mut #[allow(clippy::needless_pass_by_ref_mut)] pub fn check(&self, req: &mut ServiceRequest) -> bool { let guard_ctx = req.guard_ctx(); diff --git a/actix-web/src/scope.rs b/actix-web/src/scope.rs index e7c4e047a..81f3615b0 100644 --- a/actix-web/src/scope.rs +++ b/actix-web/src/scope.rs @@ -213,7 +213,6 @@ where /// /// * *Resource* is an entry in resource table which corresponds to requested URL. /// * *Scope* is a set of resources with common root path. - /// * "StaticFiles" is a service for static files support /// /// ``` /// use actix_web::{web, App, HttpRequest}; @@ -470,8 +469,9 @@ impl ServiceFactory for ScopeFactory { let guards = guards.borrow_mut().take().unwrap_or_default(); let factory_fut = factory.new_service(()); async move { - let service = factory_fut.await?; - Ok((path, guards, service)) + factory_fut + .await + .map(move |service| (path, guards, service)) } })); @@ -547,7 +547,6 @@ impl ServiceFactory for ScopeEndpoint { #[cfg(test)] mod tests { - use actix_service::Service; use actix_utils::future::ok; use bytes::Bytes; @@ -559,7 +558,6 @@ mod tests { Method, StatusCode, }, middleware::DefaultHeaders, - service::{ServiceRequest, ServiceResponse}, test::{assert_body_eq, call_service, init_service, read_body, TestRequest}, web, App, HttpMessage, HttpRequest, HttpResponse, }; diff --git a/actix-web/src/server.rs b/actix-web/src/server.rs index 2cc00cb9f..d8519fb9e 100644 --- a/actix-web/src/server.rs +++ b/actix-web/src/server.rs @@ -7,7 +7,7 @@ use std::{ time::Duration, }; -#[cfg(any(feature = "openssl", feature = "rustls-0_20", feature = "rustls-0_21"))] +#[cfg(feature = "__tls")] use actix_http::TlsAcceptorConfig; use actix_http::{body::MessageBody, Extensions, HttpService, KeepAlive, Request, Response}; use actix_server::{Server, ServerBuilder}; @@ -184,7 +184,7 @@ where /// By default max connections is set to a 256. #[allow(unused_variables)] pub fn max_connection_rate(self, num: usize) -> Self { - #[cfg(any(feature = "rustls-0_20", feature = "rustls-0_21", feature = "openssl"))] + #[cfg(feature = "__tls")] actix_tls::accept::max_concurrent_tls_connect(num); self } @@ -237,7 +237,7 @@ where /// time, the connection is closed. /// /// By default, the handshake timeout is 3 seconds. - #[cfg(any(feature = "openssl", feature = "rustls-0_20", feature = "rustls-0_21"))] + #[cfg(feature = "__tls")] pub fn tls_handshake_timeout(self, dur: Duration) -> Self { self.config .lock() @@ -265,6 +265,10 @@ where /// Rustls v0.20. /// - `actix_tls::accept::rustls_0_21::TlsStream` when using /// Rustls v0.21. + /// - `actix_tls::accept::rustls_0_22::TlsStream` when using + /// Rustls v0.22. + /// - `actix_tls::accept::rustls_0_23::TlsStream` when using + /// Rustls v0.23. /// - `actix_web::rt::net::TcpStream` when no encryption is used. /// /// See the `on_connect` example for additional details. @@ -442,6 +446,44 @@ where Ok(self) } + /// Resolves socket address(es) and binds server to created listener(s) for TLS connections + /// using Rustls v0.22. + /// + /// See [`bind()`](Self::bind()) for more details on `addrs` argument. + /// + /// ALPN protocols "h2" and "http/1.1" are added to any configured ones. + #[cfg(feature = "rustls-0_22")] + pub fn bind_rustls_0_22( + mut self, + addrs: A, + config: actix_tls::accept::rustls_0_22::reexports::ServerConfig, + ) -> io::Result { + let sockets = bind_addrs(addrs, self.backlog)?; + for lst in sockets { + self = self.listen_rustls_0_22_inner(lst, config.clone())?; + } + Ok(self) + } + + /// Resolves socket address(es) and binds server to created listener(s) for TLS connections + /// using Rustls v0.23. + /// + /// See [`bind()`](Self::bind()) for more details on `addrs` argument. + /// + /// ALPN protocols "h2" and "http/1.1" are added to any configured ones. + #[cfg(feature = "rustls-0_23")] + pub fn bind_rustls_0_23( + mut self, + addrs: A, + config: actix_tls::accept::rustls_0_23::reexports::ServerConfig, + ) -> io::Result { + let sockets = bind_addrs(addrs, self.backlog)?; + for lst in sockets { + self = self.listen_rustls_0_23_inner(lst, config.clone())?; + } + Ok(self) + } + /// Resolves socket address(es) and binds server to created listener(s) for TLS connections /// using OpenSSL. /// @@ -468,7 +510,7 @@ where /// No changes are made to `lst`'s configuration. Ensure it is configured properly before /// passing ownership to `listen()`. pub fn listen(mut self, lst: net::TcpListener) -> io::Result { - let cfg = self.config.clone(); + let cfg = Arc::clone(&self.config); let factory = self.factory.clone(); let addr = lst.local_addr().unwrap(); @@ -512,7 +554,7 @@ where /// Binds to existing listener for accepting incoming plaintext HTTP/1.x or HTTP/2 connections. #[cfg(feature = "http2")] pub fn listen_auto_h2c(mut self, lst: net::TcpListener) -> io::Result { - let cfg = self.config.clone(); + let cfg = Arc::clone(&self.config); let factory = self.factory.clone(); let addr = lst.local_addr().unwrap(); @@ -571,7 +613,7 @@ where /// Binds to existing listener for accepting incoming TLS connection requests using Rustls /// v0.21. /// - /// See [`listen()`](Self::listen) for more details on the `lst` argument. + /// See [`listen()`](Self::listen()) for more details on the `lst` argument. /// /// ALPN protocols "h2" and "http/1.1" are added to any configured ones. #[cfg(feature = "rustls-0_21")] @@ -590,7 +632,7 @@ where config: actix_tls::accept::rustls_0_20::reexports::ServerConfig, ) -> io::Result { let factory = self.factory.clone(); - let cfg = self.config.clone(); + let cfg = Arc::clone(&self.config); let addr = lst.local_addr().unwrap(); self.sockets.push(Socket { addr, @@ -641,7 +683,7 @@ where config: actix_tls::accept::rustls_0_21::reexports::ServerConfig, ) -> io::Result { let factory = self.factory.clone(); - let cfg = self.config.clone(); + let cfg = Arc::clone(&self.config); let addr = lst.local_addr().unwrap(); self.sockets.push(Socket { addr, @@ -685,6 +727,138 @@ where Ok(self) } + /// Binds to existing listener for accepting incoming TLS connection requests using Rustls + /// v0.22. + /// + /// See [`listen()`](Self::listen()) for more details on the `lst` argument. + /// + /// ALPN protocols "h2" and "http/1.1" are added to any configured ones. + #[cfg(feature = "rustls-0_22")] + pub fn listen_rustls_0_22( + self, + lst: net::TcpListener, + config: actix_tls::accept::rustls_0_22::reexports::ServerConfig, + ) -> io::Result { + self.listen_rustls_0_22_inner(lst, config) + } + + #[cfg(feature = "rustls-0_22")] + fn listen_rustls_0_22_inner( + mut self, + lst: net::TcpListener, + config: actix_tls::accept::rustls_0_22::reexports::ServerConfig, + ) -> io::Result { + let factory = self.factory.clone(); + let cfg = Arc::clone(&self.config); + let addr = lst.local_addr().unwrap(); + self.sockets.push(Socket { + addr, + scheme: "https", + }); + + let on_connect_fn = self.on_connect_fn.clone(); + + self.builder = + self.builder + .listen(format!("actix-web-service-{}", addr), lst, move || { + let c = cfg.lock().unwrap(); + let host = c.host.clone().unwrap_or_else(|| format!("{}", addr)); + + let svc = HttpService::build() + .keep_alive(c.keep_alive) + .client_request_timeout(c.client_request_timeout) + .client_disconnect_timeout(c.client_disconnect_timeout); + + let svc = if let Some(handler) = on_connect_fn.clone() { + svc.on_connect_ext(move |io: &_, ext: _| (handler)(io as &dyn Any, ext)) + } else { + svc + }; + + let fac = factory() + .into_factory() + .map_err(|err| err.into().error_response()); + + let acceptor_config = match c.tls_handshake_timeout { + Some(dur) => TlsAcceptorConfig::default().handshake_timeout(dur), + None => TlsAcceptorConfig::default(), + }; + + svc.finish(map_config(fac, move |_| { + AppConfig::new(true, host.clone(), addr) + })) + .rustls_0_22_with_config(config.clone(), acceptor_config) + })?; + + Ok(self) + } + + /// Binds to existing listener for accepting incoming TLS connection requests using Rustls + /// v0.23. + /// + /// See [`listen()`](Self::listen()) for more details on the `lst` argument. + /// + /// ALPN protocols "h2" and "http/1.1" are added to any configured ones. + #[cfg(feature = "rustls-0_23")] + pub fn listen_rustls_0_23( + self, + lst: net::TcpListener, + config: actix_tls::accept::rustls_0_23::reexports::ServerConfig, + ) -> io::Result { + self.listen_rustls_0_23_inner(lst, config) + } + + #[cfg(feature = "rustls-0_23")] + fn listen_rustls_0_23_inner( + mut self, + lst: net::TcpListener, + config: actix_tls::accept::rustls_0_23::reexports::ServerConfig, + ) -> io::Result { + let factory = self.factory.clone(); + let cfg = Arc::clone(&self.config); + let addr = lst.local_addr().unwrap(); + self.sockets.push(Socket { + addr, + scheme: "https", + }); + + let on_connect_fn = self.on_connect_fn.clone(); + + self.builder = + self.builder + .listen(format!("actix-web-service-{}", addr), lst, move || { + let c = cfg.lock().unwrap(); + let host = c.host.clone().unwrap_or_else(|| format!("{}", addr)); + + let svc = HttpService::build() + .keep_alive(c.keep_alive) + .client_request_timeout(c.client_request_timeout) + .client_disconnect_timeout(c.client_disconnect_timeout); + + let svc = if let Some(handler) = on_connect_fn.clone() { + svc.on_connect_ext(move |io: &_, ext: _| (handler)(io as &dyn Any, ext)) + } else { + svc + }; + + let fac = factory() + .into_factory() + .map_err(|err| err.into().error_response()); + + let acceptor_config = match c.tls_handshake_timeout { + Some(dur) => TlsAcceptorConfig::default().handshake_timeout(dur), + None => TlsAcceptorConfig::default(), + }; + + svc.finish(map_config(fac, move |_| { + AppConfig::new(true, host.clone(), addr) + })) + .rustls_0_23_with_config(config.clone(), acceptor_config) + })?; + + Ok(self) + } + /// Binds to existing listener for accepting incoming TLS connection requests using OpenSSL. /// /// See [`listen()`](Self::listen) for more details on the `lst` argument. @@ -706,7 +880,7 @@ where acceptor: SslAcceptor, ) -> io::Result { let factory = self.factory.clone(); - let cfg = self.config.clone(); + let cfg = Arc::clone(&self.config); let addr = lst.local_addr().unwrap(); self.sockets.push(Socket { addr, @@ -763,7 +937,7 @@ where use actix_rt::net::UnixStream; use actix_service::{fn_service, ServiceFactoryExt as _}; - let cfg = self.config.clone(); + let cfg = Arc::clone(&self.config); let factory = self.factory.clone(); let socket_addr = net::SocketAddr::new(net::IpAddr::V4(net::Ipv4Addr::new(127, 0, 0, 1)), 8080); @@ -808,7 +982,7 @@ where use actix_rt::net::UnixStream; use actix_service::{fn_service, ServiceFactoryExt as _}; - let cfg = self.config.clone(); + let cfg = Arc::clone(&self.config); let factory = self.factory.clone(); let socket_addr = net::SocketAddr::new(net::IpAddr::V4(net::Ipv4Addr::new(127, 0, 0, 1)), 8080); diff --git a/actix-web/src/service.rs b/actix-web/src/service.rs index 0e17c9949..a1672eba2 100644 --- a/actix-web/src/service.rs +++ b/actix-web/src/service.rs @@ -221,12 +221,9 @@ impl ServiceRequest { /// Returns peer's socket address. /// - /// Peer address is the directly connected peer's socket address. If a proxy is used in front of - /// the Actix Web server, then it would be address of this proxy. + /// See [`HttpRequest::peer_addr`] for more details. /// - /// To get client connection information `ConnectionInfo` should be used. - /// - /// Will only return None when called in unit tests. + /// [`HttpRequest::peer_addr`]: crate::HttpRequest::peer_addr #[inline] pub fn peer_addr(&self) -> Option { self.head().peer_addr @@ -703,7 +700,7 @@ mod tests { use crate::{ guard, http, test::{self, init_service, TestRequest}, - web, App, HttpResponse, + web, App, }; #[actix_rt::test] diff --git a/actix-web/src/test/test_request.rs b/actix-web/src/test/test_request.rs index 5491af0ac..f178d6f43 100644 --- a/actix-web/src/test/test_request.rs +++ b/actix-web/src/test/test_request.rs @@ -86,76 +86,77 @@ impl Default for TestRequest { #[allow(clippy::wrong_self_convention)] impl TestRequest { - /// Create TestRequest and set request uri - pub fn with_uri(path: &str) -> TestRequest { - TestRequest::default().uri(path) + /// Constructs test request and sets request URI. + pub fn with_uri(uri: &str) -> TestRequest { + TestRequest::default().uri(uri) } - /// Create TestRequest and set method to `Method::GET` + /// Constructs test request with GET method. pub fn get() -> TestRequest { TestRequest::default().method(Method::GET) } - /// Create TestRequest and set method to `Method::POST` + /// Constructs test request with POST method. pub fn post() -> TestRequest { TestRequest::default().method(Method::POST) } - /// Create TestRequest and set method to `Method::PUT` + /// Constructs test request with PUT method. pub fn put() -> TestRequest { TestRequest::default().method(Method::PUT) } - /// Create TestRequest and set method to `Method::PATCH` + /// Constructs test request with PATCH method. pub fn patch() -> TestRequest { TestRequest::default().method(Method::PATCH) } - /// Create TestRequest and set method to `Method::DELETE` + /// Constructs test request with DELETE method. pub fn delete() -> TestRequest { TestRequest::default().method(Method::DELETE) } - /// Set HTTP version of this request + /// Sets HTTP version of this request. pub fn version(mut self, ver: Version) -> Self { self.req.version(ver); self } - /// Set HTTP method of this request + /// Sets method of this request. pub fn method(mut self, meth: Method) -> Self { self.req.method(meth); self } - /// Set HTTP URI of this request + /// Sets URI of this request. pub fn uri(mut self, path: &str) -> Self { self.req.uri(path); self } - /// Insert a header, replacing any that were set with an equivalent field name. + /// Inserts a header, replacing any that were set with an equivalent field name. pub fn insert_header(mut self, header: impl TryIntoHeaderPair) -> Self { self.req.insert_header(header); self } - /// Append a header, keeping any that were set with an equivalent field name. + /// Appends a header, keeping any that were set with an equivalent field name. pub fn append_header(mut self, header: impl TryIntoHeaderPair) -> Self { self.req.append_header(header); self } - /// Set cookie for this request. + /// Sets cookie for this request. #[cfg(feature = "cookies")] pub fn cookie(mut self, cookie: Cookie<'_>) -> Self { self.cookies.add(cookie.into_owned()); self } - /// Set request path pattern parameter. + /// Sets request path pattern parameter. /// /// # Examples + /// /// ``` /// use actix_web::test::TestRequest; /// @@ -171,19 +172,19 @@ impl TestRequest { self } - /// Set peer addr. + /// Sets peer address. pub fn peer_addr(mut self, addr: SocketAddr) -> Self { self.peer_addr = Some(addr); self } - /// Set request payload. + /// Sets request payload. pub fn set_payload(mut self, data: impl Into) -> Self { self.req.set_payload(data); self } - /// Serialize `data` to a URL encoded form and set it as the request payload. + /// Serializes `data` to a URL encoded form and set it as the request payload. /// /// The `Content-Type` header is set to `application/x-www-form-urlencoded`. pub fn set_form(mut self, data: impl Serialize) -> Self { @@ -194,7 +195,7 @@ impl TestRequest { self } - /// Serialize `data` to JSON and set it as the request payload. + /// Serializes `data` to JSON and set it as the request payload. /// /// The `Content-Type` header is set to `application/json`. pub fn set_json(mut self, data: impl Serialize) -> Self { @@ -204,27 +205,33 @@ impl TestRequest { self } - /// Set application data. This is equivalent of `App::data()` method - /// for testing purpose. - pub fn data(mut self, data: T) -> Self { - self.app_data.insert(Data::new(data)); - self - } - - /// Set application data. This is equivalent of `App::app_data()` method - /// for testing purpose. + /// Inserts application data. + /// + /// This is equivalent of `App::app_data()` method for testing purpose. pub fn app_data(mut self, data: T) -> Self { self.app_data.insert(data); self } + /// Inserts application data. + /// + /// This is equivalent of `App::data()` method for testing purpose. + #[doc(hidden)] + pub fn data(mut self, data: T) -> Self { + self.app_data.insert(Data::new(data)); + self + } + + /// Sets resource map. #[cfg(test)] - /// Set request config pub(crate) fn rmap(mut self, rmap: ResourceMap) -> Self { self.rmap = rmap; self } + /// Finalizes test request. + /// + /// This request builder will be useless after calling `finish()`. fn finish(&mut self) -> Request { // mut used when cookie feature is enabled #[allow(unused_mut)] @@ -251,14 +258,14 @@ impl TestRequest { req } - /// Complete request creation and generate `Request` instance + /// Finalizes request creation and returns `Request` instance. pub fn to_request(mut self) -> Request { let mut req = self.finish(); req.head_mut().peer_addr = self.peer_addr; req } - /// Complete request creation and generate `ServiceRequest` instance + /// Finalizes request creation and returns `ServiceRequest` instance. pub fn to_srv_request(mut self) -> ServiceRequest { let (mut head, payload) = self.finish().into_parts(); head.peer_addr = self.peer_addr; @@ -279,12 +286,12 @@ impl TestRequest { ) } - /// Complete request creation and generate `ServiceResponse` instance + /// Finalizes request creation and returns `ServiceResponse` instance. pub fn to_srv_response(self, res: HttpResponse) -> ServiceResponse { self.to_srv_request().into_response(res) } - /// Complete request creation and generate `HttpRequest` instance + /// Finalizes request creation and returns `HttpRequest` instance. pub fn to_http_request(mut self) -> HttpRequest { let (mut head, _) = self.finish().into_parts(); head.peer_addr = self.peer_addr; @@ -302,7 +309,7 @@ impl TestRequest { ) } - /// Complete request creation and generate `HttpRequest` and `Payload` instances + /// Finalizes request creation and returns `HttpRequest` and `Payload` pair. pub fn to_http_parts(mut self) -> (HttpRequest, Payload) { let (mut head, payload) = self.finish().into_parts(); head.peer_addr = self.peer_addr; @@ -322,7 +329,7 @@ impl TestRequest { (req, payload) } - /// Complete request creation, calls service and waits for response future completion. + /// Finalizes request creation, calls service, and waits for response future completion. pub async fn send_request(self, app: &S) -> S::Response where S: Service, Error = E>, @@ -343,7 +350,7 @@ mod tests { use std::time::SystemTime; use super::*; - use crate::{http::header, test::init_service, web, App, Error, HttpResponse, Responder}; + use crate::{http::header, test::init_service, web, App, Error, Responder}; #[actix_rt::test] async fn test_basics() { diff --git a/actix-web/src/thin_data.rs b/actix-web/src/thin_data.rs new file mode 100644 index 000000000..a9cd4e3a4 --- /dev/null +++ b/actix-web/src/thin_data.rs @@ -0,0 +1,121 @@ +use std::any::type_name; + +use actix_utils::future::{ready, Ready}; + +use crate::{dev::Payload, error, FromRequest, HttpRequest}; + +/// Application data wrapper and extractor for cheaply-cloned types. +/// +/// Similar to the [`Data`] wrapper but for `Clone`/`Copy` types that are already an `Arc` internally, +/// share state using some other means when cloned, or is otherwise static data that is very cheap +/// to clone. +/// +/// Unlike `Data`, this wrapper clones `T` during extraction. Therefore, it is the user's +/// responsibility to ensure that clones of `T` do actually share the same state, otherwise state +/// may be unexpectedly different across multiple requests. +/// +/// Note that if your type is literally an `Arc` then it's recommended to use the +/// [`Data::from(arc)`][data_from_arc] conversion instead. +/// +/// # Examples +/// +/// ``` +/// use actix_web::{ +/// web::{self, ThinData}, +/// App, HttpResponse, Responder, +/// }; +/// +/// // Use the `ThinData` extractor to access a database connection pool. +/// async fn index(ThinData(db_pool): ThinData) -> impl Responder { +/// // database action ... +/// +/// HttpResponse::Ok() +/// } +/// +/// # type DbPool = (); +/// let db_pool = DbPool::default(); +/// +/// App::new() +/// .app_data(ThinData(db_pool.clone())) +/// .service(web::resource("/").get(index)) +/// # ; +/// ``` +/// +/// [`Data`]: crate::web::Data +/// [data_from_arc]: crate::web::Data#impl-From>-for-Data +#[derive(Debug, Clone)] +pub struct ThinData(pub T); + +impl_more::impl_as_ref!(ThinData => T); +impl_more::impl_as_mut!(ThinData => T); +impl_more::impl_deref_and_mut!( in ThinData => T); + +impl FromRequest for ThinData { + type Error = crate::Error; + type Future = Ready>; + + #[inline] + fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future { + ready(req.app_data::().cloned().ok_or_else(|| { + log::debug!( + "Failed to extract `ThinData<{}>` for `{}` handler. For the ThinData extractor to work \ + correctly, wrap the data with `ThinData()` and pass it to `App::app_data()`. \ + Ensure that types align in both the set and retrieve calls.", + type_name::(), + req.match_name().unwrap_or(req.path()) + ); + + error::ErrorInternalServerError( + "Requested application data is not configured correctly. \ + View/enable debug logs for more details.", + ) + })) + } +} + +#[cfg(test)] +mod tests { + use std::sync::{Arc, Mutex}; + + use super::*; + use crate::{ + http::StatusCode, + test::{call_service, init_service, TestRequest}, + web, App, HttpResponse, + }; + + type TestT = Arc>; + + #[actix_rt::test] + async fn thin_data() { + let test_data = TestT::default(); + + let app = init_service(App::new().app_data(ThinData(test_data.clone())).service( + web::resource("/").to(|td: ThinData| { + *td.lock().unwrap() += 1; + HttpResponse::Ok() + }), + )) + .await; + + for _ in 0..3 { + let req = TestRequest::default().to_request(); + let resp = call_service(&app, req).await; + assert_eq!(resp.status(), StatusCode::OK); + } + + assert_eq!(*test_data.lock().unwrap(), 3); + } + + #[actix_rt::test] + async fn thin_data_missing() { + let app = init_service( + App::new().service(web::resource("/").to(|_: ThinData| HttpResponse::Ok())), + ) + .await; + + let req = TestRequest::default().to_request(); + let resp = call_service(&app, req).await; + assert_eq!(resp.status(), StatusCode::INTERNAL_SERVER_ERROR); + } +} diff --git a/actix-web/src/types/either.rs b/actix-web/src/types/either.rs index db244fd9a..7883e89f6 100644 --- a/actix-web/src/types/either.rs +++ b/actix-web/src/types/either.rs @@ -287,10 +287,7 @@ mod tests { use serde::{Deserialize, Serialize}; use super::*; - use crate::{ - test::TestRequest, - web::{Form, Json}, - }; + use crate::test::TestRequest; #[derive(Debug, Clone, Serialize, Deserialize)] struct TestForm { diff --git a/actix-web/src/types/form.rs b/actix-web/src/types/form.rs index 7096b1e9c..d6381b990 100644 --- a/actix-web/src/types/form.rs +++ b/actix-web/src/types/form.rs @@ -418,7 +418,7 @@ mod tests { use super::*; use crate::{ http::{ - header::{HeaderValue, CONTENT_LENGTH, CONTENT_TYPE}, + header::{HeaderValue, CONTENT_TYPE}, StatusCode, }, test::{assert_body_eq, TestRequest}, diff --git a/actix-web/src/types/html.rs b/actix-web/src/types/html.rs new file mode 100644 index 000000000..c370ee07b --- /dev/null +++ b/actix-web/src/types/html.rs @@ -0,0 +1,66 @@ +//! Semantic HTML responder. See [`Html`]. + +use crate::{ + http::{ + header::{self, ContentType, TryIntoHeaderValue}, + StatusCode, + }, + HttpRequest, HttpResponse, Responder, +}; + +/// Semantic HTML responder. +/// +/// When used as a responder, creates a 200 OK response, sets the correct HTML content type, and +/// uses the string passed to [`Html::new()`] as the body. +/// +/// ``` +/// # use actix_web::web::Html; +/// Html::new("

Hello, World!

") +/// # ; +/// ``` +#[derive(Debug, Clone, PartialEq, Hash)] +pub struct Html(String); + +impl Html { + /// Constructs a new `Html` responder. + pub fn new(html: impl Into) -> Self { + Self(html.into()) + } +} + +impl Responder for Html { + type Body = String; + + fn respond_to(self, _req: &HttpRequest) -> HttpResponse { + let mut res = HttpResponse::with_body(StatusCode::OK, self.0); + res.headers_mut().insert( + header::CONTENT_TYPE, + ContentType::html().try_into_value().unwrap(), + ); + res + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::test::TestRequest; + + #[test] + fn responder() { + let req = TestRequest::default().to_http_request(); + + let res = Html::new("

Hello, World!

"); + let res = res.respond_to(&req); + + assert!(res.status().is_success()); + assert!(res + .headers() + .get(header::CONTENT_TYPE) + .unwrap() + .to_str() + .unwrap() + .starts_with("text/html")); + assert!(res.body().starts_with("

")); + } +} diff --git a/actix-web/src/types/mod.rs b/actix-web/src/types/mod.rs index 792edd650..cabe53d6a 100644 --- a/actix-web/src/types/mod.rs +++ b/actix-web/src/types/mod.rs @@ -3,6 +3,7 @@ mod either; mod form; mod header; +mod html; mod json; mod path; mod payload; @@ -13,6 +14,7 @@ pub use self::{ either::Either, form::{Form, FormConfig, UrlEncoded}, header::Header, + html::Html, json::{Json, JsonBody, JsonConfig}, path::{Path, PathConfig}, payload::{Payload, PayloadConfig}, diff --git a/actix-web/src/types/payload.rs b/actix-web/src/types/payload.rs index abb4e6b7f..e4db37d0b 100644 --- a/actix-web/src/types/payload.rs +++ b/actix-web/src/types/payload.rs @@ -440,13 +440,11 @@ impl Future for HttpMessageBody { #[cfg(test)] mod tests { - use bytes::Bytes; - use super::*; use crate::{ - http::{header, StatusCode}, + http::StatusCode, test::{call_service, init_service, read_body, TestRequest}, - web, App, Responder, + App, Responder, }; #[actix_rt::test] diff --git a/actix-web/src/web.rs b/actix-web/src/web.rs index 204313752..3a4c46730 100644 --- a/actix-web/src/web.rs +++ b/actix-web/src/web.rs @@ -2,6 +2,7 @@ //! //! # Request Extractors //! - [`Data`]: Application data item +//! - [`ThinData`]: Cheap-to-clone application data item //! - [`ReqData`]: Request-local data item //! - [`Path`]: URL path parameters / dynamic segments //! - [`Query`]: URL query parameters @@ -22,7 +23,8 @@ use actix_router::IntoPatterns; pub use bytes::{Buf, BufMut, Bytes, BytesMut}; pub use crate::{ - config::ServiceConfig, data::Data, redirect::Redirect, request_data::ReqData, types::*, + config::ServiceConfig, data::Data, redirect::Redirect, request_data::ReqData, + thin_data::ThinData, types::*, }; use crate::{ error::BlockingError, http::Method, service::WebService, FromRequest, Handler, Resource, diff --git a/actix-web/tests/test_httpserver.rs b/actix-web/tests/test_httpserver.rs index 86e0575f3..039c0ffbc 100644 --- a/actix-web/tests/test_httpserver.rs +++ b/actix-web/tests/test_httpserver.rs @@ -64,9 +64,11 @@ fn ssl_acceptor() -> openssl::ssl::SslAcceptorBuilder { x509::X509, }; - let cert = rcgen::generate_simple_self_signed(vec!["localhost".to_owned()]).unwrap(); - let cert_file = cert.serialize_pem().unwrap(); - let key_file = cert.serialize_private_key_pem(); + let rcgen::CertifiedKey { cert, key_pair } = + rcgen::generate_simple_self_signed(["localhost".to_owned()]).unwrap(); + let cert_file = cert.pem(); + let key_file = key_pair.serialize_pem(); + let cert = X509::from_pem(cert_file.as_bytes()).unwrap(); let key = PKey::private_key_from_pem(key_file.as_bytes()).unwrap(); diff --git a/actix-web/tests/test_server.rs b/actix-web/tests/test_server.rs index a268cb6e3..960cf1e2b 100644 --- a/actix-web/tests/test_server.rs +++ b/actix-web/tests/test_server.rs @@ -1,6 +1,6 @@ #[cfg(feature = "openssl")] extern crate tls_openssl as openssl; -#[cfg(feature = "rustls-0_21")] +#[cfg(feature = "rustls-0_23")] extern crate tls_rustls as rustls; use std::{ @@ -34,9 +34,11 @@ const STR: &str = const_str::repeat!(S, 100); #[cfg(feature = "openssl")] fn openssl_config() -> SslAcceptor { - let cert = rcgen::generate_simple_self_signed(vec!["localhost".to_owned()]).unwrap(); - let cert_file = cert.serialize_pem().unwrap(); - let key_file = cert.serialize_private_key_pem(); + let rcgen::CertifiedKey { cert, key_pair } = + rcgen::generate_simple_self_signed(["localhost".to_owned()]).unwrap(); + let cert_file = cert.pem(); + let key_file = key_pair.serialize_pem(); + let cert = X509::from_pem(cert_file.as_bytes()).unwrap(); let key = PKey::private_key_from_pem(key_file.as_bytes()).unwrap(); @@ -704,34 +706,32 @@ async fn test_brotli_encoding_large_openssl() { srv.stop().await; } -#[cfg(feature = "rustls-0_21")] +#[cfg(feature = "rustls-0_23")] mod plus_rustls { use std::io::BufReader; - use rustls::{Certificate, PrivateKey, ServerConfig as RustlsServerConfig}; + use rustls::{pki_types::PrivateKeyDer, ServerConfig as RustlsServerConfig}; use rustls_pemfile::{certs, pkcs8_private_keys}; use super::*; fn tls_config() -> RustlsServerConfig { - let cert = rcgen::generate_simple_self_signed(vec!["localhost".to_owned()]).unwrap(); - let cert_file = cert.serialize_pem().unwrap(); - let key_file = cert.serialize_private_key_pem(); + let rcgen::CertifiedKey { cert, key_pair } = + rcgen::generate_simple_self_signed(["localhost".to_owned()]).unwrap(); + let cert_file = cert.pem(); + let key_file = key_pair.serialize_pem(); let cert_file = &mut BufReader::new(cert_file.as_bytes()); let key_file = &mut BufReader::new(key_file.as_bytes()); - let cert_chain = certs(cert_file) - .unwrap() - .into_iter() - .map(Certificate) - .collect(); - let mut keys = pkcs8_private_keys(key_file).unwrap(); + let cert_chain = certs(cert_file).collect::, _>>().unwrap(); + let mut keys = pkcs8_private_keys(key_file) + .collect::, _>>() + .unwrap(); RustlsServerConfig::builder() - .with_safe_defaults() .with_no_client_auth() - .with_single_cert(cert_chain, PrivateKey(keys.remove(0))) + .with_single_cert(cert_chain, PrivateKeyDer::Pkcs8(keys.remove(0))) .unwrap() } @@ -743,7 +743,7 @@ mod plus_rustls { .map(char::from) .collect::(); - let srv = actix_test::start_with(actix_test::config().rustls_021(tls_config()), || { + let srv = actix_test::start_with(actix_test::config().rustls_0_23(tls_config()), || { App::new().service(web::resource("/").route(web::to(|bytes: Bytes| async { // echo decompressed request body back in response HttpResponse::Ok() diff --git a/awc/CHANGES.md b/awc/CHANGES.md index 3a1996cba..54c5e9869 100644 --- a/awc/CHANGES.md +++ b/awc/CHANGES.md @@ -2,6 +2,21 @@ ## Unreleased +## 3.5.0 + +- Add `rustls-0_23`, `rustls-0_23-webpki-roots`, and `rustls-0_23-native-roots` crate features. +- Add `awc::Connector::rustls_0_23()` constructor. +- Fix `rustls-0_22-native-roots` root store lookup +- Update `brotli` dependency to `6`. +- Minimum supported Rust version (MSRV) is now 1.72. + +## 3.4.0 + +- Add `rustls-0_22-webpki-roots` and `rustls-0_22-native-roots` crate feature. +- Add `awc::Connector::rustls_0_22()` method. + +## 3.3.0 + - Update `trust-dns-resolver` dependency to `0.23`. - Updated `zstd` dependency to `0.13`. diff --git a/awc/Cargo.toml b/awc/Cargo.toml index 07811c979..353ca0d54 100644 --- a/awc/Cargo.toml +++ b/awc/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "awc" -version = "3.2.0" +version = "3.5.0" authors = ["Nikolay Kim "] description = "Async HTTP and WebSocket client library" keywords = ["actix", "http", "framework", "async", "web"] @@ -15,13 +15,40 @@ repository = "https://github.com/actix/actix-web" license = "MIT OR Apache-2.0" edition = "2021" -[lib] -name = "awc" -path = "src/lib.rs" - [package.metadata.docs.rs] -# features that docs.rs will build with -features = ["openssl", "rustls-0_20", "rustls-0_21", "compress-brotli", "compress-gzip", "compress-zstd", "cookies"] +rustdoc-args = ["--cfg", "docsrs"] +features = [ + "cookies", + "openssl", + "rustls-0_20", + "rustls-0_21", + "rustls-0_22-webpki-roots", + "rustls-0_23-webpki-roots", + "compress-brotli", + "compress-gzip", + "compress-zstd", +] + +[package.metadata.cargo_check_external_types] +allowed_external_types = [ + "actix_codec::*", + "actix_http::*", + "actix_rt::*", + "actix_service::*", + "actix_tls::*", + "bytes::*", + "cookie::*", + "cookie", + "futures_core::*", + "h2::*", + "http::*", + "openssl::*", + "rustls::*", + "serde_json::*", + "serde_urlencoded::*", + "serde::*", + "tokio::*", +] [features] default = ["compress-brotli", "compress-gzip", "compress-zstd", "cookies"] @@ -35,6 +62,16 @@ rustls = ["rustls-0_20"] rustls-0_20 = ["tls-rustls-0_20", "actix-tls/rustls-0_20"] # TLS via Rustls v0.21 rustls-0_21 = ["tls-rustls-0_21", "actix-tls/rustls-0_21"] +# TLS via Rustls v0.22 (WebPKI roots) +rustls-0_22-webpki-roots = ["tls-rustls-0_22", "actix-tls/rustls-0_22-webpki-roots"] +# TLS via Rustls v0.22 (Native roots) +rustls-0_22-native-roots = ["tls-rustls-0_22", "actix-tls/rustls-0_22-native-roots"] +# TLS via Rustls v0.23 +rustls-0_23 = ["tls-rustls-0_23", "actix-tls/rustls-0_23"] +# TLS via Rustls v0.23 (WebPKI roots) +rustls-0_23-webpki-roots = ["rustls-0_23", "actix-tls/rustls-0_23-webpki-roots"] +# TLS via Rustls v0.23 (Native roots) +rustls-0_23-native-roots = ["rustls-0_23", "actix-tls/rustls-0_23-native-roots"] # Brotli algorithm content-encoding support compress-brotli = ["actix-http/compress-brotli", "__compress"] @@ -44,7 +81,7 @@ compress-gzip = ["actix-http/compress-gzip", "__compress"] compress-zstd = ["actix-http/compress-zstd", "__compress"] # Cookie parsing and cookie jar -cookies = ["cookie"] +cookies = ["dep:cookie"] # Use `trust-dns-resolver` crate as DNS resolver trust-dns = ["trust-dns-resolver"] @@ -61,18 +98,18 @@ dangerous-h2c = [] [dependencies] actix-codec = "0.5" actix-service = "2" -actix-http = { version = "3.4", features = ["http2", "ws"] } +actix-http = { version = "3.7", features = ["http2", "ws"] } actix-rt = { version = "2.1", default-features = false } -actix-tls = { version = "3.1", features = ["connect", "uri"] } +actix-tls = { version = "3.4", features = ["connect", "uri"] } actix-utils = "3" -base64 = "0.21" +base64 = "0.22" bytes = "1" cfg-if = "1" derive_more = "0.99.5" futures-core = { version = "0.3.17", default-features = false, features = ["alloc"] } futures-util = { version = "0.3.17", default-features = false, features = ["alloc", "sink"] } -h2 = "0.3.17" +h2 = "0.3.26" http = "0.2.7" itoa = "1" log =" 0.4" @@ -90,29 +127,35 @@ cookie = { version = "0.16", features = ["percent-encode"], optional = true } tls-openssl = { package = "openssl", version = "0.10.55", optional = true } tls-rustls-0_20 = { package = "rustls", version = "0.20", optional = true, features = ["dangerous_configuration"] } tls-rustls-0_21 = { package = "rustls", version = "0.21", optional = true, features = ["dangerous_configuration"] } +tls-rustls-0_22 = { package = "rustls", version = "0.22", optional = true } +tls-rustls-0_23 = { package = "rustls", version = "0.23", optional = true, default-features = false } trust-dns-resolver = { version = "0.23", optional = true } [dev-dependencies] -actix-http = { version = "3.4", features = ["openssl"] } +actix-http = { version = "3.7", features = ["openssl"] } actix-http-test = { version = "3", features = ["openssl"] } actix-server = "2" -actix-test = { version = "0.1", features = ["openssl", "rustls-0_21"] } -actix-tls = { version = "3", features = ["openssl", "rustls-0_21"] } +actix-test = { version = "0.1", features = ["openssl", "rustls-0_23"] } +actix-tls = { version = "3.4", features = ["openssl", "rustls-0_23"] } actix-utils = "3" actix-web = { version = "4", features = ["openssl"] } -brotli = "3.3.3" +brotli = "6" const-str = "0.5" -env_logger = "0.10" +env_logger = "0.11" flate2 = "1.0.13" futures-util = { version = "0.3.17", default-features = false } static_assertions = "1.1" -rcgen = "0.11" -rustls-pemfile = "1" +rcgen = "0.13" +rustls-pemfile = "2" tokio = { version = "1.24.2", features = ["rt-multi-thread", "macros"] } zstd = "0.13" +tls-rustls-0_23 = { package = "rustls", version = "0.23" } # add rustls 0.23 with default features to make aws_lc_rs work in tests + +[lints] +workspace = true [[example]] name = "client" -required-features = ["rustls-0_21"] +required-features = ["rustls-0_23-webpki-roots"] diff --git a/awc/README.md b/awc/README.md index 1f31167eb..8e7b42812 100644 --- a/awc/README.md +++ b/awc/README.md @@ -1,20 +1,22 @@ -# awc (Actix Web Client) +# `awc` (Actix Web Client) > Async HTTP and WebSocket client library. + + [![crates.io](https://img.shields.io/crates/v/awc?label=latest)](https://crates.io/crates/awc) -[![Documentation](https://docs.rs/awc/badge.svg?version=3.2.0)](https://docs.rs/awc/3.2.0) +[![Documentation](https://docs.rs/awc/badge.svg?version=3.5.0)](https://docs.rs/awc/3.5.0) ![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/awc) -[![Dependency Status](https://deps.rs/crate/awc/3.2.0/status.svg)](https://deps.rs/crate/awc/3.2.0) +[![Dependency Status](https://deps.rs/crate/awc/3.5.0/status.svg)](https://deps.rs/crate/awc/3.5.0) [![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x) -## Documentation & Resources + -- [API Documentation](https://docs.rs/awc) -- [Example Project](https://github.com/actix/examples/tree/master/https-tls/awc-https) -- Minimum Supported Rust Version (MSRV): 1.68 +## Examples -## Example +[Example project using TLS-enabled client →](https://github.com/actix/examples/tree/master/https-tls/awc-https) + +Basic usage: ```rust use actix_rt::System; diff --git a/awc/examples/client.rs b/awc/examples/client.rs index 16ad330b8..41626315c 100644 --- a/awc/examples/client.rs +++ b/awc/examples/client.rs @@ -1,6 +1,8 @@ use std::error::Error as StdError; -#[tokio::main] +/// If we want to make requests to addresses starting with `https`, we need to enable the rustls feature of awc +/// `awc = { version = "3.5.0", features = ["rustls"] }` +#[actix_rt::main] async fn main() -> Result<(), Box> { env_logger::init_from_env(env_logger::Env::new().default_filter_or("info")); diff --git a/awc/src/any_body.rs b/awc/src/any_body.rs index 08f5cc25e..ef0edfb9e 100644 --- a/awc/src/any_body.rs +++ b/awc/src/any_body.rs @@ -163,6 +163,7 @@ mod tests { use super::*; + #[allow(dead_code)] struct PinType(PhantomPinned); impl MessageBody for PinType { diff --git a/awc/src/builder.rs b/awc/src/builder.rs index a54960382..5aae394f8 100644 --- a/awc/src/builder.rs +++ b/awc/src/builder.rs @@ -37,6 +37,12 @@ pub struct ClientBuilder { } impl ClientBuilder { + /// Create a new ClientBuilder with default settings + /// + /// Note: If the `rustls-0_23` feature is enabled and neither `rustls-0_23-native-roots` nor + /// `rustls-0_23-webpki-roots` are enabled, this ClientBuilder will build without TLS. In order + /// to enable TLS in this scenario, a custom `Connector` _must_ be added to the builder before + /// finishing construction. #[allow(clippy::new_ret_no_self)] pub fn new() -> ClientBuilder< impl Service< diff --git a/awc/src/client/connection.rs b/awc/src/client/connection.rs index 5ed965bed..8164e2b59 100644 --- a/awc/src/client/connection.rs +++ b/awc/src/client/connection.rs @@ -380,8 +380,6 @@ mod test { use std::{ future::Future, net, - pin::Pin, - task::{Context, Poll}, time::{Duration, Instant}, }; diff --git a/awc/src/client/connector.rs b/awc/src/client/connector.rs index 879d1895b..f3d443070 100644 --- a/awc/src/client/connector.rs +++ b/awc/src/client/connector.rs @@ -40,14 +40,27 @@ enum OurTlsConnector { /// Provided because building the OpenSSL context on newer versions can be very slow. /// This prevents unnecessary calls to `.build()` while constructing the client connector. #[cfg(feature = "openssl")] - #[allow(dead_code)] // false positive; used in build_ssl + #[allow(dead_code)] // false positive; used in build_tls OpensslBuilder(actix_tls::connect::openssl::reexports::SslConnectorBuilder), #[cfg(feature = "rustls-0_20")] + #[allow(dead_code)] // false positive; used in build_tls Rustls020(std::sync::Arc), #[cfg(feature = "rustls-0_21")] + #[allow(dead_code)] // false positive; used in build_tls Rustls021(std::sync::Arc), + + #[cfg(any( + feature = "rustls-0_22-webpki-roots", + feature = "rustls-0_22-native-roots", + ))] + #[allow(dead_code)] // false positive; used in build_tls + Rustls022(std::sync::Arc), + + #[cfg(feature = "rustls-0_23")] + #[allow(dead_code)] // false positive; used in build_tls + Rustls023(std::sync::Arc), } /// Manages HTTP client network connectivity. @@ -71,6 +84,14 @@ pub struct Connector { } impl Connector<()> { + /// Create a new connector with default TLS settings + /// + /// # Panics + /// + /// - When the `rustls-0_23-webpki-roots` or `rustls-0_23-native-roots` features are enabled + /// and no default crypto provider has been loaded, this method will panic. + /// - When the `rustls-0_23-native-roots` or `rustls-0_22-native-roots` features are enabled + /// and the runtime system has no native root certificates, this method will panic. #[allow(clippy::new_ret_no_self, clippy::let_unit_value)] pub fn new() -> Connector< impl Service< @@ -86,67 +107,105 @@ impl Connector<()> { } } - /// Provides an empty TLS connector when no TLS feature is enabled. - #[cfg(not(any(feature = "openssl", feature = "rustls-0_20", feature = "rustls-0_21")))] - fn build_tls(_: Vec>) -> OurTlsConnector { - OurTlsConnector::None - } + cfg_if::cfg_if! { + if #[cfg(any(feature = "rustls-0_23-webpki-roots", feature = "rustls-0_23-native-roots"))] { + /// Build TLS connector with Rustls v0.23, based on supplied ALPN protocols. + /// + /// Note that if other TLS crate features are enabled, Rustls v0.23 will be used. + fn build_tls(protocols: Vec>) -> OurTlsConnector { + use actix_tls::connect::rustls_0_23::{self, reexports::ClientConfig}; - /// Build TLS connector with Rustls v0.21, based on supplied ALPN protocols - /// - /// Note that if other TLS crate features are enabled, Rustls v0.21 will be used. - #[cfg(feature = "rustls-0_21")] - fn build_tls(protocols: Vec>) -> OurTlsConnector { - use actix_tls::connect::rustls_0_21::{reexports::ClientConfig, webpki_roots_cert_store}; + cfg_if::cfg_if! { + if #[cfg(feature = "rustls-0_23-webpki-roots")] { + let certs = rustls_0_23::webpki_roots_cert_store(); + } else if #[cfg(feature = "rustls-0_23-native-roots")] { + let certs = rustls_0_23::native_roots_cert_store().expect("Failed to find native root certificates"); + } + } - let mut config = ClientConfig::builder() - .with_safe_defaults() - .with_root_certificates(webpki_roots_cert_store()) - .with_no_client_auth(); + let mut config = ClientConfig::builder() + .with_root_certificates(certs) + .with_no_client_auth(); - config.alpn_protocols = protocols; + config.alpn_protocols = protocols; - OurTlsConnector::Rustls021(std::sync::Arc::new(config)) - } + OurTlsConnector::Rustls023(std::sync::Arc::new(config)) + } + } else if #[cfg(any(feature = "rustls-0_22-webpki-roots", feature = "rustls-0_22-native-roots"))] { + /// Build TLS connector with Rustls v0.22, based on supplied ALPN protocols. + fn build_tls(protocols: Vec>) -> OurTlsConnector { + use actix_tls::connect::rustls_0_22::{self, reexports::ClientConfig}; - /// Build TLS connector with Rustls v0.20, based on supplied ALPN protocols - /// - /// Note that if other TLS crate features are enabled, Rustls v0.21 will be used. - #[cfg(all(feature = "rustls-0_20", not(feature = "rustls-0_21")))] - fn build_tls(protocols: Vec>) -> OurTlsConnector { - use actix_tls::connect::rustls_0_20::{reexports::ClientConfig, webpki_roots_cert_store}; + cfg_if::cfg_if! { + if #[cfg(feature = "rustls-0_22-webpki-roots")] { + let certs = rustls_0_22::webpki_roots_cert_store(); + } else if #[cfg(feature = "rustls-0_22-native-roots")] { + let certs = rustls_0_22::native_roots_cert_store().expect("Failed to find native root certificates"); + } + } - let mut config = ClientConfig::builder() - .with_safe_defaults() - .with_root_certificates(webpki_roots_cert_store()) - .with_no_client_auth(); + let mut config = ClientConfig::builder() + .with_root_certificates(certs) + .with_no_client_auth(); - config.alpn_protocols = protocols; + config.alpn_protocols = protocols; - OurTlsConnector::Rustls020(std::sync::Arc::new(config)) - } + OurTlsConnector::Rustls022(std::sync::Arc::new(config)) + } + } else if #[cfg(feature = "rustls-0_21")] { + /// Build TLS connector with Rustls v0.21, based on supplied ALPN protocols. + fn build_tls(protocols: Vec>) -> OurTlsConnector { + use actix_tls::connect::rustls_0_21::{reexports::ClientConfig, webpki_roots_cert_store}; - /// Build TLS connector with OpenSSL, based on supplied ALPN protocols - #[cfg(all( - feature = "openssl", - not(any(feature = "rustls-0_20", feature = "rustls-0_21")), - ))] - fn build_tls(protocols: Vec>) -> OurTlsConnector { - use actix_tls::connect::openssl::reexports::{SslConnector, SslMethod}; - use bytes::{BufMut, BytesMut}; + let mut config = ClientConfig::builder() + .with_safe_defaults() + .with_root_certificates(webpki_roots_cert_store()) + .with_no_client_auth(); - let mut alpn = BytesMut::with_capacity(20); - for proto in &protocols { - alpn.put_u8(proto.len() as u8); - alpn.put(proto.as_slice()); + config.alpn_protocols = protocols; + + OurTlsConnector::Rustls021(std::sync::Arc::new(config)) + } + } else if #[cfg(feature = "rustls-0_20")] { + /// Build TLS connector with Rustls v0.20, based on supplied ALPN protocols. + fn build_tls(protocols: Vec>) -> OurTlsConnector { + use actix_tls::connect::rustls_0_20::{reexports::ClientConfig, webpki_roots_cert_store}; + + let mut config = ClientConfig::builder() + .with_safe_defaults() + .with_root_certificates(webpki_roots_cert_store()) + .with_no_client_auth(); + + config.alpn_protocols = protocols; + + OurTlsConnector::Rustls020(std::sync::Arc::new(config)) + } + } else if #[cfg(feature = "openssl")] { + /// Build TLS connector with OpenSSL, based on supplied ALPN protocols. + fn build_tls(protocols: Vec>) -> OurTlsConnector { + use actix_tls::connect::openssl::reexports::{SslConnector, SslMethod}; + use bytes::{BufMut, BytesMut}; + + let mut alpn = BytesMut::with_capacity(20); + for proto in &protocols { + alpn.put_u8(proto.len() as u8); + alpn.put(proto.as_slice()); + } + + let mut ssl = SslConnector::builder(SslMethod::tls()).unwrap(); + if let Err(err) = ssl.set_alpn_protos(&alpn) { + log::error!("Can not set ALPN protocol: {err:?}"); + } + + OurTlsConnector::OpensslBuilder(ssl) + } + } else { + /// Provides an empty TLS connector when no TLS feature is enabled, or when only the + /// `rustls-0_23` crate feature is enabled. + fn build_tls(_: Vec>) -> OurTlsConnector { + OurTlsConnector::None + } } - - let mut ssl = SslConnector::builder(SslMethod::tls()).unwrap(); - if let Err(err) = ssl.set_alpn_protos(&alpn) { - log::error!("Can not set ALPN protocol: {:?}", err); - } - - OurTlsConnector::OpensslBuilder(ssl) } } @@ -240,6 +299,37 @@ where self } + /// Sets custom Rustls v0.22 `ClientConfig` instance. + #[cfg(any( + feature = "rustls-0_22-webpki-roots", + feature = "rustls-0_22-native-roots", + ))] + pub fn rustls_0_22( + mut self, + connector: std::sync::Arc, + ) -> Self { + self.tls = OurTlsConnector::Rustls022(connector); + self + } + + /// Sets custom Rustls v0.23 `ClientConfig` instance. + /// + /// In order to enable ALPN, set the `.alpn_protocols` field on the ClientConfig to the + /// following: + /// + /// ```no_run + /// vec![b"h2".to_vec(), b"http/1.1".to_vec()] + /// # ; + /// ``` + #[cfg(feature = "rustls-0_23")] + pub fn rustls_0_23( + mut self, + connector: std::sync::Arc, + ) -> Self { + self.tls = OurTlsConnector::Rustls023(connector); + self + } + /// Sets maximum supported HTTP major version. /// /// Supported versions are HTTP/1.1 and HTTP/2. @@ -363,6 +453,7 @@ where use actix_tls::connect::Connection; use actix_utils::future::{ready, Ready}; + #[allow(non_local_definitions)] impl IntoConnectionIo for TcpConnection> { fn into_connection_io(self) -> (Box, Protocol) { let io = self.into_parts().0; @@ -413,6 +504,7 @@ where use actix_tls::connect::openssl::{reexports::AsyncSslStream, TlsConnector}; + #[allow(non_local_definitions)] impl IntoConnectionIo for TcpConnection> { fn into_connection_io(self) -> (Box, Protocol) { let sock = self.into_parts().0; @@ -450,6 +542,7 @@ where use actix_tls::connect::rustls_0_20::{reexports::AsyncTlsStream, TlsConnector}; + #[allow(non_local_definitions)] impl IntoConnectionIo for TcpConnection> { fn into_connection_io(self) -> (Box, Protocol) { let sock = self.into_parts().0; @@ -483,6 +576,78 @@ where use actix_tls::connect::rustls_0_21::{reexports::AsyncTlsStream, TlsConnector}; + #[allow(non_local_definitions)] + impl IntoConnectionIo for TcpConnection> { + fn into_connection_io(self) -> (Box, Protocol) { + let sock = self.into_parts().0; + let h2 = sock + .get_ref() + .1 + .alpn_protocol() + .map_or(false, |protos| protos.windows(2).any(|w| w == H2)); + if h2 { + (Box::new(sock), Protocol::Http2) + } else { + (Box::new(sock), Protocol::Http1) + } + } + } + + let handshake_timeout = self.config.handshake_timeout; + + let tls_service = TlsConnectorService { + tcp_service: tcp_service_inner, + tls_service: TlsConnector::service(tls), + timeout: handshake_timeout, + }; + + Some(actix_service::boxed::rc_service(tls_service)) + } + + #[cfg(any( + feature = "rustls-0_22-webpki-roots", + feature = "rustls-0_22-native-roots", + ))] + OurTlsConnector::Rustls022(tls) => { + const H2: &[u8] = b"h2"; + + use actix_tls::connect::rustls_0_22::{reexports::AsyncTlsStream, TlsConnector}; + + #[allow(non_local_definitions)] + impl IntoConnectionIo for TcpConnection> { + fn into_connection_io(self) -> (Box, Protocol) { + let sock = self.into_parts().0; + let h2 = sock + .get_ref() + .1 + .alpn_protocol() + .map_or(false, |protos| protos.windows(2).any(|w| w == H2)); + if h2 { + (Box::new(sock), Protocol::Http2) + } else { + (Box::new(sock), Protocol::Http1) + } + } + } + + let handshake_timeout = self.config.handshake_timeout; + + let tls_service = TlsConnectorService { + tcp_service: tcp_service_inner, + tls_service: TlsConnector::service(tls), + timeout: handshake_timeout, + }; + + Some(actix_service::boxed::rc_service(tls_service)) + } + + #[cfg(feature = "rustls-0_23")] + OurTlsConnector::Rustls023(tls) => { + const H2: &[u8] = b"h2"; + + use actix_tls::connect::rustls_0_23::{reexports::AsyncTlsStream, TlsConnector}; + + #[allow(non_local_definitions)] impl IntoConnectionIo for TcpConnection> { fn into_connection_io(self) -> (Box, Protocol) { let sock = self.into_parts().0; @@ -570,6 +735,17 @@ where /// service for establish tcp connection and do client tls handshake. /// operation is canceled when timeout limit reached. +#[cfg(any( + feature = "dangerous-h2c", + feature = "openssl", + feature = "rustls-0_20", + feature = "rustls-0_21", + feature = "rustls-0_22-webpki-roots", + feature = "rustls-0_22-native-roots", + feature = "rustls-0_23", + feature = "rustls-0_23-webpki-roots", + feature = "rustls-0_23-native-roots" +))] struct TlsConnectorService { /// TCP connection is canceled on `TcpConnectorInnerService`'s timeout setting. tcp_service: Tcp, @@ -580,6 +756,15 @@ struct TlsConnectorService { timeout: Duration, } +#[cfg(any( + feature = "dangerous-h2c", + feature = "openssl", + feature = "rustls-0_20", + feature = "rustls-0_21", + feature = "rustls-0_22-webpki-roots", + feature = "rustls-0_22-native-roots", + feature = "rustls-0_23", +))] impl Service for TlsConnectorService where Tcp: @@ -861,7 +1046,6 @@ mod resolver { use std::{cell::RefCell, net::SocketAddr}; use actix_tls::connect::Resolve; - use futures_core::future::LocalBoxFuture; use trust_dns_resolver::{ config::{ResolverConfig, ResolverOpts}, system_conf::read_system_conf, @@ -896,7 +1080,7 @@ mod resolver { // resolver struct is cached in thread local so new clients can reuse the existing instance thread_local! { - static TRUST_DNS_RESOLVER: RefCell> = RefCell::new(None); + static TRUST_DNS_RESOLVER: RefCell> = const { RefCell::new(None) }; } // get from thread local or construct a new trust-dns resolver. diff --git a/awc/src/client/pool.rs b/awc/src/client/pool.rs index 2cf1f3ace..4c439e4eb 100644 --- a/awc/src/client/pool.rs +++ b/awc/src/client/pool.rs @@ -173,12 +173,15 @@ where }; // acquire an owned permit and carry it with connection - let permit = inner.permits.clone().acquire_owned().await.map_err(|_| { - ConnectError::Io(io::Error::new( - io::ErrorKind::Other, - "failed to acquire semaphore on client connection pool", - )) - })?; + let permit = Arc::clone(&inner.permits) + .acquire_owned() + .await + .map_err(|_| { + ConnectError::Io(io::Error::new( + io::ErrorKind::Other, + "failed to acquire semaphore on client connection pool", + )) + })?; let conn = { let mut conn = None; @@ -374,12 +377,11 @@ impl Acquired { #[cfg(test)] mod test { - use std::{cell::Cell, io}; + use std::cell::Cell; use http::Uri; use super::*; - use crate::client::connection::ConnectionType; /// A stream type that always returns pending on async read. /// diff --git a/awc/src/frozen.rs b/awc/src/frozen.rs index 8f3244997..90b2c6efd 100644 --- a/awc/src/frozen.rs +++ b/awc/src/frozen.rs @@ -49,7 +49,7 @@ impl FrozenClientRequest { where B: MessageBody + 'static, { - RequestSender::Rc(self.head.clone(), None).send_body( + RequestSender::Rc(Rc::clone(&self.head), None).send_body( self.addr, self.response_decompress, self.timeout, @@ -60,7 +60,7 @@ impl FrozenClientRequest { /// Send a json body. pub fn send_json(&self, value: &T) -> SendClientRequest { - RequestSender::Rc(self.head.clone(), None).send_json( + RequestSender::Rc(Rc::clone(&self.head), None).send_json( self.addr, self.response_decompress, self.timeout, @@ -71,7 +71,7 @@ impl FrozenClientRequest { /// Send an urlencoded body. pub fn send_form(&self, value: &T) -> SendClientRequest { - RequestSender::Rc(self.head.clone(), None).send_form( + RequestSender::Rc(Rc::clone(&self.head), None).send_form( self.addr, self.response_decompress, self.timeout, @@ -86,7 +86,7 @@ impl FrozenClientRequest { S: Stream> + 'static, E: Into + 'static, { - RequestSender::Rc(self.head.clone(), None).send_stream( + RequestSender::Rc(Rc::clone(&self.head), None).send_stream( self.addr, self.response_decompress, self.timeout, @@ -97,7 +97,7 @@ impl FrozenClientRequest { /// Send an empty body. pub fn send(&self) -> SendClientRequest { - RequestSender::Rc(self.head.clone(), None).send( + RequestSender::Rc(Rc::clone(&self.head), None).send( self.addr, self.response_decompress, self.timeout, diff --git a/awc/src/lib.rs b/awc/src/lib.rs index 253b5161a..b582d51e4 100644 --- a/awc/src/lib.rs +++ b/awc/src/lib.rs @@ -100,8 +100,7 @@ //! # } //! ``` -#![deny(rust_2018_idioms, nonstandard_style)] -#![warn(future_incompatible)] +#![allow(unknown_lints)] // temp: #[allow(non_local_definitions)] #![allow( clippy::type_complexity, clippy::borrow_interior_mutable_const, diff --git a/awc/src/middleware/redirect.rs b/awc/src/middleware/redirect.rs index c38d6ad92..b2cf9c45b 100644 --- a/awc/src/middleware/redirect.rs +++ b/awc/src/middleware/redirect.rs @@ -78,7 +78,7 @@ where RedirectServiceFuture::Tunnel { fut } } ConnectRequest::Client(head, body, addr) => { - let connector = self.connector.clone(); + let connector = Rc::clone(&self.connector); let max_redirect_times = self.max_redirect_times; // backup the uri and method for reuse schema and authority. @@ -303,10 +303,7 @@ mod tests { use actix_web::{web, App, Error, HttpRequest, HttpResponse}; use super::*; - use crate::{ - http::{header::HeaderValue, StatusCode}, - ClientBuilder, - }; + use crate::{http::header::HeaderValue, ClientBuilder}; #[actix_rt::test] async fn basic_redirect() { diff --git a/awc/src/responses/json_body.rs b/awc/src/responses/json_body.rs index 3912324b6..e9c03d81a 100644 --- a/awc/src/responses/json_body.rs +++ b/awc/src/responses/json_body.rs @@ -118,7 +118,7 @@ mod tests { use static_assertions::assert_impl_all; use super::*; - use crate::{http::header, test::TestResponse}; + use crate::test::TestResponse; assert_impl_all!(JsonBody: Unpin); diff --git a/awc/src/responses/response_body.rs b/awc/src/responses/response_body.rs index 8d9d1274a..0ff58341f 100644 --- a/awc/src/responses/response_body.rs +++ b/awc/src/responses/response_body.rs @@ -110,7 +110,7 @@ mod tests { use static_assertions::assert_impl_all; use super::*; - use crate::{http::header, test::TestResponse}; + use crate::test::TestResponse; assert_impl_all!(ResponseBody<()>: Unpin); diff --git a/awc/src/test.rs b/awc/src/test.rs index 96ae1f0a1..126583179 100644 --- a/awc/src/test.rs +++ b/awc/src/test.rs @@ -103,7 +103,7 @@ mod tests { use actix_http::header::HttpDate; use super::*; - use crate::{cookie, http::header}; + use crate::http::header; #[test] fn test_basics() { diff --git a/awc/tests/test_connector.rs b/awc/tests/test_connector.rs index b3eb97367..a8b7e98c1 100644 --- a/awc/tests/test_connector.rs +++ b/awc/tests/test_connector.rs @@ -13,9 +13,11 @@ use openssl::{ }; fn tls_config() -> SslAcceptor { - let cert = rcgen::generate_simple_self_signed(vec!["localhost".to_owned()]).unwrap(); - let cert_file = cert.serialize_pem().unwrap(); - let key_file = cert.serialize_private_key_pem(); + let rcgen::CertifiedKey { cert, key_pair } = + rcgen::generate_simple_self_signed(["localhost".to_owned()]).unwrap(); + let cert_file = cert.pem(); + let key_file = key_pair.serialize_pem(); + let cert = X509::from_pem(cert_file.as_bytes()).unwrap(); let key = PKey::private_key_from_pem(key_file.as_bytes()).unwrap(); diff --git a/awc/tests/test_rustls_client.rs b/awc/tests/test_rustls_client.rs index d758f93d8..7e832f67d 100644 --- a/awc/tests/test_rustls_client.rs +++ b/awc/tests/test_rustls_client.rs @@ -1,6 +1,6 @@ -#![cfg(feature = "rustls-0_21")] +#![cfg(feature = "rustls-0_23-webpki-roots")] -extern crate tls_rustls_0_21 as rustls; +extern crate tls_rustls_0_23 as rustls; use std::{ io::BufReader, @@ -8,59 +8,85 @@ use std::{ atomic::{AtomicUsize, Ordering}, Arc, }, - time::SystemTime, }; use actix_http::HttpService; use actix_http_test::test_server; use actix_service::{fn_service, map_config, ServiceFactoryExt}; -use actix_tls::connect::rustls_0_21::webpki_roots_cert_store; +use actix_tls::connect::rustls_0_23::webpki_roots_cert_store; use actix_utils::future::ok; use actix_web::{dev::AppConfig, http::Version, web, App, HttpResponse}; use rustls::{ - client::{ServerCertVerified, ServerCertVerifier}, - Certificate, ClientConfig, PrivateKey, ServerConfig, ServerName, + pki_types::{CertificateDer, PrivateKeyDer, ServerName}, + ClientConfig, ServerConfig, }; use rustls_pemfile::{certs, pkcs8_private_keys}; fn tls_config() -> ServerConfig { - let cert = rcgen::generate_simple_self_signed(vec!["localhost".to_owned()]).unwrap(); - let cert_file = cert.serialize_pem().unwrap(); - let key_file = cert.serialize_private_key_pem(); + let rcgen::CertifiedKey { cert, key_pair } = + rcgen::generate_simple_self_signed(["localhost".to_owned()]).unwrap(); + let cert_file = cert.pem(); + let key_file = key_pair.serialize_pem(); let cert_file = &mut BufReader::new(cert_file.as_bytes()); let key_file = &mut BufReader::new(key_file.as_bytes()); - let cert_chain = certs(cert_file) - .unwrap() - .into_iter() - .map(Certificate) - .collect(); - let mut keys = pkcs8_private_keys(key_file).unwrap(); + let cert_chain = certs(cert_file).collect::, _>>().unwrap(); + let mut keys = pkcs8_private_keys(key_file) + .collect::, _>>() + .unwrap(); ServerConfig::builder() - .with_safe_defaults() .with_no_client_auth() - .with_single_cert(cert_chain, PrivateKey(keys.remove(0))) + .with_single_cert(cert_chain, PrivateKeyDer::Pkcs8(keys.remove(0))) .unwrap() } mod danger { + use rustls::{ + client::danger::{ServerCertVerified, ServerCertVerifier}, + pki_types::UnixTime, + }; + use super::*; + #[derive(Debug)] pub struct NoCertificateVerification; impl ServerCertVerifier for NoCertificateVerification { fn verify_server_cert( &self, - _end_entity: &Certificate, - _intermediates: &[Certificate], - _server_name: &ServerName, - _scts: &mut dyn Iterator, + _end_entity: &CertificateDer<'_>, + _intermediates: &[CertificateDer<'_>], + _server_name: &ServerName<'_>, _ocsp_response: &[u8], - _now: SystemTime, + _now: UnixTime, ) -> Result { - Ok(ServerCertVerified::assertion()) + Ok(rustls::client::danger::ServerCertVerified::assertion()) + } + + fn verify_tls12_signature( + &self, + _message: &[u8], + _cert: &CertificateDer<'_>, + _dss: &rustls::DigitallySignedStruct, + ) -> Result { + Ok(rustls::client::danger::HandshakeSignatureValid::assertion()) + } + + fn verify_tls13_signature( + &self, + _message: &[u8], + _cert: &CertificateDer<'_>, + _dss: &rustls::DigitallySignedStruct, + ) -> Result { + Ok(rustls::client::danger::HandshakeSignatureValid::assertion()) + } + + fn supported_verify_schemes(&self) -> Vec { + rustls::crypto::aws_lc_rs::default_provider() + .signature_verification_algorithms + .supported_schemes() } } } @@ -82,14 +108,13 @@ async fn test_connection_reuse_h2() { App::new().service(web::resource("/").route(web::to(HttpResponse::Ok))), |_| AppConfig::default(), )) - .rustls_021(tls_config()) + .rustls_0_23(tls_config()) .map_err(|_| ()), ) }) .await; let mut config = ClientConfig::builder() - .with_safe_defaults() .with_root_certificates(webpki_roots_cert_store()) .with_no_client_auth(); @@ -102,7 +127,7 @@ async fn test_connection_reuse_h2() { .set_certificate_verifier(Arc::new(danger::NoCertificateVerification)); let client = awc::Client::builder() - .connector(awc::Connector::new().rustls_021(Arc::new(config))) + .connector(awc::Connector::new().rustls_0_23(Arc::new(config))) .finish(); // req 1 diff --git a/awc/tests/test_ssl_client.rs b/awc/tests/test_ssl_client.rs index 5273c3fff..95d4c15f1 100644 --- a/awc/tests/test_ssl_client.rs +++ b/awc/tests/test_ssl_client.rs @@ -19,9 +19,11 @@ use openssl::{ }; fn tls_config() -> SslAcceptor { - let cert = rcgen::generate_simple_self_signed(vec!["localhost".to_owned()]).unwrap(); - let cert_file = cert.serialize_pem().unwrap(); - let key_file = cert.serialize_private_key_pem(); + let rcgen::CertifiedKey { cert, key_pair } = + rcgen::generate_simple_self_signed(["localhost".to_owned()]).unwrap(); + let cert_file = cert.pem(); + let key_file = key_pair.serialize_pem(); + let cert = X509::from_pem(cert_file.as_bytes()).unwrap(); let key = PKey::private_key_from_pem(key_file.as_bytes()).unwrap(); diff --git a/justfile b/justfile index f2e449d85..5cd56b12e 100644 --- a/justfile +++ b/justfile @@ -1,11 +1,133 @@ _list: @just --list +# Format workspace. +fmt: + just --unstable --fmt + cargo +nightly fmt + fd --hidden --type=file --extension=md --extension=yml --exec-batch npx -y prettier --write + +# Downgrade dev-dependencies necessary to run MSRV checks/tests. +[private] +downgrade-for-msrv: + cargo update -p=clap --precise=4.4.18 + +msrv := ``` + cargo metadata --format-version=1 \ + | jq -r 'first(.packages[] | select(.source == null and .rust_version)) | .rust_version' \ + | sed -E 's/^1\.([0-9]{2})$/1\.\1\.0/' +``` +msrv_rustup := "+" + msrv +non_linux_all_features_list := ``` + cargo metadata --format-version=1 \ + | jq '.packages[] | select(.source == null) | .features | keys' \ + | jq -r --slurp \ + --arg exclusions "__tls,__compress,tokio-uring,io-uring,experimental-io-uring" \ + 'add | unique | . - ($exclusions | split(",")) | join(",")' +``` +all_crate_features := if os() == "linux" { "--all-features" } else { "--features='" + non_linux_all_features_list + "'" } + +[private] +check-min: + cargo hack --workspace check --no-default-features + +[private] +check-default: + cargo hack --workspace check + +# Run Clippy over workspace. +clippy toolchain="": + cargo {{ toolchain }} clippy --workspace --all-targets {{ all_crate_features }} + +# Test workspace using MSRV. +test-msrv: downgrade-for-msrv (test msrv_rustup) + +# Test workspace code. +test toolchain="": + cargo {{ toolchain }} test --lib --tests -p=actix-web-codegen --all-features + cargo {{ toolchain }} test --lib --tests -p=actix-multipart-derive --all-features + cargo {{ toolchain }} nextest run -p=actix-router --no-default-features + cargo {{ toolchain }} nextest run --workspace --exclude=actix-web-codegen --exclude=actix-multipart-derive {{ all_crate_features }} --filter-expr="not test(test_reading_deflate_encoding_large_random_rustls)" + +# Test workspace docs. +test-docs toolchain="": && doc + cargo {{ toolchain }} test --doc --workspace {{ all_crate_features }} --no-fail-fast -- --nocapture + +# Test workspace. +test-all toolchain="": (test toolchain) (test-docs toolchain) + +# Test workspace and collect coverage info. +[private] +test-coverage toolchain="": + cargo {{ toolchain }} llvm-cov nextest --no-report {{ all_crate_features }} + cargo {{ toolchain }} llvm-cov --doc --no-report {{ all_crate_features }} + +# Test workspace and generate Codecov report. +test-coverage-codecov toolchain="": (test-coverage toolchain) + cargo {{ toolchain }} llvm-cov report --doctests --codecov --output-path=codecov.json + +# Test workspace and generate LCOV report. +test-coverage-lcov toolchain="": (test-coverage toolchain) + cargo {{ toolchain }} llvm-cov report --doctests --lcov --output-path=lcov.info + # Document crates in workspace. -doc: - RUSTDOCFLAGS="--cfg=docsrs" cargo +nightly doc --no-deps --workspace --features=rustls,openssl +doc *args: && doc-set-workspace-crates + RUSTDOCFLAGS="--cfg=docsrs -Dwarnings" cargo +nightly doc --workspace {{ all_crate_features }} {{ args }} + +[private] +doc-set-workspace-crates: + #!/usr/bin/env bash + ( + echo "window.ALL_CRATES =" + cargo metadata --format-version=1 \ + | jq '[.packages[] | select(.source == null) | .targets | map(select(.doc) | .name)] | flatten' + echo ";" + ) > "$(cargo metadata --format-version=1 | jq -r '.target_directory')/doc/crates.js" # Document crates in workspace and watch for changes. doc-watch: - RUSTDOCFLAGS="--cfg=docsrs" cargo +nightly doc --no-deps --workspace --features=rustls,openssl --open - cargo watch -- RUSTDOCFLAGS="--cfg=docsrs" cargo +nightly doc --no-deps --workspace --features=rustls,openssl + @just doc --open + cargo watch -- just doc + +# Update READMEs from crate root documentation. +update-readmes: && fmt + cd ./actix-files && cargo rdme --force + cd ./actix-http-test && cargo rdme --force + cd ./actix-router && cargo rdme --force + cd ./actix-multipart && cargo rdme --force + cd ./actix-test && cargo rdme --force + +feature_combo_skip_list := if os() == "linux" { "__tls,__compress" } else { "__tls,__compress,experimental-io-uring" } + +# Checks compatibility of feature combinations. +check-feature-combinations: + cargo hack --workspace \ + --feature-powerset --depth=4 \ + --skip={{ feature_combo_skip_list }} \ + check + +# Check for unintentional external type exposure on all crates in workspace. +check-external-types-all toolchain="+nightly": + #!/usr/bin/env bash + set -euo pipefail + exit=0 + for f in $(find . -mindepth 2 -maxdepth 2 -name Cargo.toml | grep -vE "\-codegen/|\-derive/|\-macros/"); do + if ! just check-external-types-manifest "$f" {{ toolchain }}; then exit=1; fi + echo + echo + done + exit $exit + +# Check for unintentional external type exposure on all crates in workspace. +check-external-types-all-table toolchain="+nightly": + #!/usr/bin/env bash + set -euo pipefail + for f in $(find . -mindepth 2 -maxdepth 2 -name Cargo.toml | grep -vE "\-codegen/|\-derive/|\-macros/"); do + echo + echo "Checking for $f" + just check-external-types-manifest "$f" {{ toolchain }} --output-format=markdown-table + done + +# Check for unintentional external type exposure on a crate. +check-external-types-manifest manifest_path toolchain="+nightly" *extra_args="": + cargo {{ toolchain }} check-external-types --manifest-path "{{ manifest_path }}" {{ extra_args }} diff --git a/scripts/bump b/scripts/bump index 9d90542c6..7a57e6ed0 100755 --- a/scripts/bump +++ b/scripts/bump @@ -5,7 +5,7 @@ # requires github cli tool for automatic release draft creation -set -euo pipefail +set -eEuo pipefail DIR=$1 @@ -113,16 +113,23 @@ read -p "Update all references: (y/N) " UPDATE_REFERENCES UPDATE_REFERENCES="${UPDATE_REFERENCES:-n}" if [ "$UPDATE_REFERENCES" = 'y' ] || [ "$UPDATE_REFERENCES" = 'Y' ]; then + if [[ $NEW_VERSION == *".0.0" ]]; then + NEW_VERSION_SPEC="${NEW_VERSION%.0.0}" + elif [[ $NEW_VERSION == *".0" ]]; then + NEW_VERSION_SPEC="${NEW_VERSION%.0}" + else + NEW_VERSION_SPEC="$NEW_VERSION" + fi for f in $(fd Cargo.toml); do sed -i.bak -E \ - "s/^(${PACKAGE_NAME} ?= ?\")[^\"]+(\")$/\1${NEW_VERSION}\2/g" $f + "s/^(${PACKAGE_NAME} ?= ?\")[^\"]+(\")$/\1${NEW_VERSION_SPEC}\2/g" $f sed -i.bak -E \ - "s/^(${PACKAGE_NAME} ?=.*version ?= ?\")[^\"]+(\".*)$/\1${NEW_VERSION}\2/g" $f + "s/^(${PACKAGE_NAME} ?=.*version ?= ?\")[^\"]+(\".*)$/\1${NEW_VERSION_SPEC}\2/g" $f sed -i.bak -E \ - "s/^(.*package ?= ?\"${PACKAGE_NAME}\".*version ?= ?\")[^\"]+(\".*)$/\1${NEW_VERSION}\2/g" $f + "s/^(.*package ?= ?\"${PACKAGE_NAME}\".*version ?= ?\")[^\"]+(\".*)$/\1${NEW_VERSION_SPEC}\2/g" $f sed -i.bak -E \ - "s/^(.*version ?= ?\")[^\"]+(\".*package ?= ?\"${PACKAGE_NAME}\".*)$/\1${NEW_VERSION}\2/g" $f + "s/^(.*version ?= ?\")[^\"]+(\".*package ?= ?\"${PACKAGE_NAME}\".*)$/\1${NEW_VERSION_SPEC}\2/g" $f # remove backup file rm -f $f.bak @@ -132,6 +139,7 @@ fi if [ $MACOS ]; then printf "chore($PACKAGE_NAME): prepare release $NEW_VERSION" | pbcopy + echo "placed the recommended commit message on the clipboard" else echo echo "commit message:" @@ -161,3 +169,5 @@ if [ "$GH_RELEASE" = 'y' ] || [ "$GH_RELEASE" = 'Y' ]; then fi echo + +cargo update >/dev/null 2>&1 || true diff --git a/scripts/free-disk-space.sh b/scripts/free-disk-space.sh new file mode 100755 index 000000000..2946cfcf6 --- /dev/null +++ b/scripts/free-disk-space.sh @@ -0,0 +1,53 @@ +#!/usr/bin/env bash + +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# The Azure provided machines typically have the following disk allocation: +# Total space: 85GB +# Allocated: 67 GB +# Free: 17 GB +# This script frees up 28 GB of disk space by deleting unneeded packages and +# large directories. +# The Flink end to end tests download and generate more than 17 GB of files, +# causing unpredictable behavior and build failures. + +echo "==============================================================================" +echo "Freeing up disk space on CI system" +echo "==============================================================================" + +echo "Listing 100 largest packages" +dpkg-query -Wf '${Installed-Size}\t${Package}\n' | sort -n | tail -n 100 +df -h + +echo "Removing large packages" +sudo apt-get remove -y '^dotnet-.*' +sudo apt-get remove -y 'php.*' +sudo apt-get remove -y '^mongodb-.*' +sudo apt-get remove -y '^mysql-.*' +sudo apt-get remove -y azure-cli google-cloud-sdk hhvm google-chrome-stable firefox powershell mono-devel libgl1-mesa-dri +sudo apt-get autoremove -y +sudo apt-get clean +df -h + +echo "Removing large directories" +sudo rm -rf /usr/share/dotnet/ +sudo rm -rf /usr/local/graalvm/ +sudo rm -rf /usr/local/.ghcup/ +sudo rm -rf /usr/local/share/powershell +sudo rm -rf /usr/local/share/chromium +sudo rm -rf /usr/local/lib/android +sudo rm -rf /usr/local/lib/node_modules +df -h