1
0
mirror of https://github.com/fafhrd91/actix-web synced 2024-11-30 18:44:35 +01:00

Merge pull request #1 from actix/master

git pull
This commit is contained in:
krircc 2019-12-01 16:56:42 +08:00 committed by GitHub
commit b7d44d6c4c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
406 changed files with 52900 additions and 33540 deletions

View File

@ -1,43 +1,21 @@
environment: environment:
global: global:
PROJECT_NAME: actix PROJECT_NAME: actix-web
matrix: matrix:
# Stable channel # Stable channel
- TARGET: i686-pc-windows-gnu
CHANNEL: 1.21.0
- TARGET: i686-pc-windows-msvc
CHANNEL: 1.21.0
- TARGET: x86_64-pc-windows-gnu
CHANNEL: 1.21.0
- TARGET: x86_64-pc-windows-msvc
CHANNEL: 1.21.0
# Stable channel
- TARGET: i686-pc-windows-gnu
CHANNEL: stable
- TARGET: i686-pc-windows-msvc - TARGET: i686-pc-windows-msvc
CHANNEL: stable CHANNEL: stable
- TARGET: x86_64-pc-windows-gnu - TARGET: x86_64-pc-windows-gnu
CHANNEL: stable CHANNEL: stable
- TARGET: x86_64-pc-windows-msvc - TARGET: x86_64-pc-windows-msvc
CHANNEL: stable CHANNEL: stable
# Beta channel
- TARGET: i686-pc-windows-gnu
CHANNEL: beta
- TARGET: i686-pc-windows-msvc
CHANNEL: beta
- TARGET: x86_64-pc-windows-gnu
CHANNEL: beta
- TARGET: x86_64-pc-windows-msvc
CHANNEL: beta
# Nightly channel # Nightly channel
- TARGET: i686-pc-windows-gnu
CHANNEL: nightly-2017-12-21
- TARGET: i686-pc-windows-msvc - TARGET: i686-pc-windows-msvc
CHANNEL: nightly-2017-12-21 CHANNEL: nightly
- TARGET: x86_64-pc-windows-gnu - TARGET: x86_64-pc-windows-gnu
CHANNEL: nightly-2017-12-21 CHANNEL: nightly
- TARGET: x86_64-pc-windows-msvc - TARGET: x86_64-pc-windows-msvc
CHANNEL: nightly-2017-12-21 CHANNEL: nightly
# Install Rust and Cargo # Install Rust and Cargo
# (Based on from https://github.com/rust-lang/libc/blob/master/appveyor.yml) # (Based on from https://github.com/rust-lang/libc/blob/master/appveyor.yml)
@ -59,4 +37,5 @@ build: false
# Equivalent to Travis' `script` phase # Equivalent to Travis' `script` phase
test_script: test_script:
- cargo test --no-default-features - cargo clean
- cargo test --no-default-features --features="flate2-rust"

View File

@ -1,25 +1,18 @@
language: rust language: rust
sudo: false sudo: required
dist: trusty dist: trusty
cache: cache:
cargo: true # cargo: true
apt: true apt: true
matrix: matrix:
include: include:
- rust: 1.21.0
- rust: stable - rust: stable
- rust: beta - rust: beta
- rust: nightly - rust: nightly-2019-11-20
allow_failures: allow_failures:
- rust: nightly - rust: nightly-2019-11-20
#rust:
# - 1.21.0
# - stable
# - beta
# - nightly-2018-01-03
env: env:
global: global:
@ -29,67 +22,40 @@ env:
before_install: before_install:
- sudo add-apt-repository -y ppa:0k53d-karl-f830m/openssl - sudo add-apt-repository -y ppa:0k53d-karl-f830m/openssl
- sudo apt-get update -qq - sudo apt-get update -qq
- sudo apt-get install -qq libssl-dev libelf-dev libdw-dev cmake gcc binutils-dev libiberty-dev - sudo apt-get install -y openssl libssl-dev libelf-dev libdw-dev cmake gcc binutils-dev libiberty-dev
before_cache: |
if [[ "$TRAVIS_RUST_VERSION" == "nightly-2019-11-20" ]]; then
RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install --version 0.6.11 cargo-tarpaulin
fi
# Add clippy # Add clippy
before_script: before_script:
- |
if [[ "$TRAVIS_RUST_VERSION" == "nightly" ]]; then
( ( cargo install clippy && export CLIPPY=true ) || export CLIPPY=false );
fi
- export PATH=$PATH:~/.cargo/bin - export PATH=$PATH:~/.cargo/bin
script: script:
- cargo update
- cargo check --all --no-default-features
- | - |
if [[ "$TRAVIS_RUST_VERSION" == "stable" ]]; then if [[ "$TRAVIS_RUST_VERSION" == "stable" || "$TRAVIS_RUST_VERSION" == "beta" ]]; then
cargo clean cargo test --all-features --all -- --nocapture
USE_SKEPTIC=1 cargo test --features=alpn cd actix-http; cargo test --no-default-features --features="rustls" -- --nocapture; cd ..
else cd awc; cargo test --no-default-features --features="rustls" -- --nocapture; cd ..
cargo clean
cargo test -- --nocapture
# --features=alpn
fi
- |
if [[ "$TRAVIS_RUST_VERSION" == "stable" ]]; then
cd examples/basics && cargo check && cd ../..
cd examples/hello-world && cargo check && cd ../..
cd examples/http-proxy && cargo check && cd ../..
cd examples/multipart && cargo check && cd ../..
cd examples/json && cargo check && cd ../..
cd examples/juniper && cargo check && cd ../..
cd examples/protobuf && cargo check && cd ../..
cd examples/state && cargo check && cd ../..
cd examples/template_tera && cargo check && cd ../..
cd examples/diesel && cargo check && cd ../..
cd examples/r2d2 && cargo check && cd ../..
cd examples/tls && cargo check && cd ../..
cd examples/websocket-chat && cargo check && cd ../..
cd examples/websocket && cargo check && cd ../..
cd examples/unix-socket && cargo check && cd ../..
fi
- |
if [[ "$TRAVIS_RUST_VERSION" == "nightly" && $CLIPPY ]]; then
cargo clippy
fi fi
# Upload docs # Upload docs
after_success: after_success:
- | - |
if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_PULL_REQUEST" = "false" && "$TRAVIS_BRANCH" == "master" && "$TRAVIS_RUST_VERSION" == "beta" ]]; then if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_PULL_REQUEST" = "false" && "$TRAVIS_BRANCH" == "master" && "$TRAVIS_RUST_VERSION" == "stable" ]]; then
cargo doc --features "alpn, tls, session" --no-deps && cargo doc --no-deps --all-features &&
echo "<meta http-equiv=refresh content=0;url=os_balloon/index.html>" > target/doc/index.html && echo "<meta http-equiv=refresh content=0;url=os_balloon/index.html>" > target/doc/index.html &&
curl -sL https://github.com/rust-lang-nursery/mdBook/releases/download/v0.1.5/mdbook-v0.1.5-x86_64-unknown-linux-gnu.tar.gz | tar xvz -C $HOME/.cargo/bin &&
cd guide && mdbook build -d ../target/doc/guide && cd .. &&
git clone https://github.com/davisp/ghp-import.git && git clone https://github.com/davisp/ghp-import.git &&
./ghp-import/ghp_import.py -n -p -f -m "Documentation upload" -r https://"$GH_TOKEN"@github.com/"$TRAVIS_REPO_SLUG.git" target/doc && ./ghp-import/ghp_import.py -n -p -f -m "Documentation upload" -r https://"$GH_TOKEN"@github.com/"$TRAVIS_REPO_SLUG.git" target/doc &&
echo "Uploaded documentation" echo "Uploaded documentation"
fi fi
- | - |
if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_RUST_VERSION" == "1.21.0" ]]; then if [[ "$TRAVIS_RUST_VERSION" == "nightly-2019-11-20" ]]; then
bash <(curl https://raw.githubusercontent.com/xd009642/tarpaulin/master/travis-install.sh) taskset -c 0 cargo tarpaulin --out Xml --all --all-features
USE_SKEPTIC=1 cargo tarpaulin --out Xml bash <(curl -s https://codecov.io/bash)
bash <(curl -s https://codecov.io/bash) echo "Uploaded code coverage"
echo "Uploaded code coverage"
fi fi

View File

@ -1,257 +1,357 @@
# Changes # Changes
## 0.5.0 ## [2.0.0-alpha.1] - 2019-11-22
* Type-safe path/query/form parameter handling, using serde #70 ### Changed
* HttpResponse builder's methods `.body()`, `.finish()`, `.json()` * Migrated to `std::future`
return `HttpResponse` instead of `Result`
* Use more ergonomic `actix_web::Error` instead of `http::Error` for `ClientRequestBuilder::body()` * Remove implementation of `Responder` for `()`. (#1167)
* Added `HttpRequest::resource()`, returns current matched resource
* Added `ErrorHandlers` middleware ## [1.0.9] - 2019-11-14
* Router cannot parse Non-ASCII characters in URL #137 ### Added
* Fix long client urls #129 * Add `Payload::into_inner` method and make stored `def::Payload` public. (#1110)
* Fix panic on invalid URL characters #130 ### Changed
* Fix client connection pooling * Support `Host` guards when the `Host` header is unset (e.g. HTTP/2 requests) (#1129)
* Fix logger request duration calculation #152
## [1.0.8] - 2019-09-25
## 0.4.10 (2018-03-20) ### Added
* Use `Error` instead of `InternalError` for `error::ErrorXXXX` methods * Add `Scope::register_data` and `Resource::register_data` methods, parallel to
`App::register_data`.
* Allow to set client request timeout * Add `middleware::Condition` that conditionally enables another middleware
* Allow to set client websocket handshake timeout * Allow to re-construct `ServiceRequest` from `HttpRequest` and `Payload`
* Refactor `TestServer` configuration * Add `HttpServer::listen_uds` for ability to listen on UDS FD rather than path,
which is useful for example with systemd.
* Fix server websockets big payloads support ### Changed
* Fix http/2 date header generation * Make UrlEncodedError::Overflow more informativve
* Use actix-testing for testing utils
## 0.4.9 (2018-03-16)
* Allow to disable http/2 support ## [1.0.7] - 2019-08-29
* Wake payload reading task when data is available ### Fixed
* Fix server keep-alive handling * Request Extensions leak #1062
* Send Query Parameters in client requests #120
* Move brotli encoding to a feature ## [1.0.6] - 2019-08-28
* Add option of default handler for `StaticFiles` handler #57 ### Added
* Add basic client connection pooling * Re-implement Host predicate (#989)
* Form immplements Responder, returning a `application/x-www-form-urlencoded` response
## 0.4.8 (2018-03-12) * Add `into_inner` to `Data`
* Allow to set read buffer capacity for server request * Add `test::TestRequest::set_form()` convenience method to automatically serialize data and set
the header in test requests.
* Handle WouldBlock error for socket accept call ### Changed
* `Query` payload made `pub`. Allows user to pattern-match the payload.
## 0.4.7 (2018-03-11) * Enable `rust-tls` feature for client #1045
* Fix panic on unknown content encoding * Update serde_urlencoded to 0.6.1
* Fix connection get closed too early * Update url to 2.1
* Fix streaming response handling for http/2
* Better sleep on error support ## [1.0.5] - 2019-07-18
### Added
## 0.4.6 (2018-03-10) * Unix domain sockets (HttpServer::bind_uds) #92
* Fix client cookie handling * Actix now logs errors resulting in "internal server error" responses always, with the `error`
logging level
* Fix json content type detection ### Fixed
* Fix CORS middleware #117 * Restored logging of errors through the `Logger` middleware
* Optimize websockets stream support
## [1.0.4] - 2019-07-17
## 0.4.5 (2018-03-07) ### Added
* Fix compression #103 and #104 * Add `Responder` impl for `(T, StatusCode) where T: Responder`
* Fix client cookie handling #111 * Allow to access app's resource map via
`ServiceRequest::resource_map()` and `HttpRequest::resource_map()` methods.
* Non-blocking processing of a `NamedFile` ### Changed
* Enable compression support for `NamedFile` * Upgrade `rand` dependency version to 0.7
* Better support for `NamedFile` type
* Add `ResponseError` impl for `SendRequestError`. This improves ergonomics of the client. ## [1.0.3] - 2019-06-28
* Add native-tls support for client ### Added
* Allow client connection timeout to be set #108 * Support asynchronous data factories #850
* Allow to use std::net::TcpListener for HttpServer ### Changed
* Handle panics in worker threads * Use `encoding_rs` crate instead of unmaintained `encoding` crate
## 0.4.4 (2018-03-04) ## [1.0.2] - 2019-06-17
* Allow to use Arc<Vec<u8>> as response/request body ### Changed
* Fix handling of requests with an encoded body with a length > 8192 #93 * Move cors middleware to `actix-cors` crate.
## 0.4.3 (2018-03-03) * Move identity middleware to `actix-identity` crate.
* Fix request body read bug
* Fix segmentation fault #79 ## [1.0.1] - 2019-06-17
* Set reuse address before bind #90 ### Added
* Add support for PathConfig #903
## 0.4.2 (2018-03-02) * Add `middleware::identity::RequestIdentity` trait to `get_identity` from `HttpMessage`.
* Better naming for websockets implementation ### Changed
* Add `Pattern::with_prefix()`, make it more usable outside of actix * Move cors middleware to `actix-cors` crate.
* Add csrf middleware for filter for cross-site request forgery #89 * Move identity middleware to `actix-identity` crate.
* Fix disconnect on idle connections * Disable default feature `secure-cookies`.
* Allow to test an app that uses async actors #897
## 0.4.1 (2018-03-01) * Re-apply patch from #637 #894
* Rename `Route::p()` to `Route::filter()` ### Fixed
* Better naming for http codes * HttpRequest::url_for is broken with nested scopes #915
* Fix payload parse in situation when socket data is not ready.
* Fix Session mutable borrow lifetime #87 ## [1.0.0] - 2019-06-05
### Added
## 0.4.0 (2018-02-28) * Add `Scope::configure()` method.
* Actix 0.5 compatibility * Add `ServiceRequest::set_payload()` method.
* Fix request json/urlencoded loaders * Add `test::TestRequest::set_json()` convenience method to automatically
serialize data and set header in test requests.
* Simplify HttpServer type definition * Add macros for head, options, trace, connect and patch http methods
* Added HttpRequest::encoding() method ### Changed
* Added HttpRequest::mime_type() method * Drop an unnecessary `Option<_>` indirection around `ServerBuilder` from `HttpServer`. #863
* Added HttpRequest::uri_mut(), allows to modify request uri ### Fixed
* Added StaticFiles::index_file() * Fix Logger request time format, and use rfc3339. #867
* Added http client * Clear http requests pool on app service drop #860
* Added websocket client
* Added TestServer::ws(), test websockets client ## [1.0.0-rc] - 2019-05-18
* Added TestServer http client support ### Add
* Allow to override content encoding on application level * Add `Query<T>::from_query()` to extract parameters from a query string. #846
* `QueryConfig`, similar to `JsonConfig` for customizing error handling of query extractors.
### Changed
## 0.3.3 (2018-01-25) * `JsonConfig` is now `Send + Sync`, this implies that `error_handler` must be `Send + Sync` too.
* Stop processing any events after context stop ### Fixed
* Re-enable write back-pressure for h1 connections * Codegen with parameters in the path only resolves the first registered endpoint #841
* Refactor HttpServer::start_ssl() method
* Upgrade openssl to 0.10 ## [1.0.0-beta.4] - 2019-05-12
### Add
## 0.3.2 (2018-01-21) * Allow to set/override app data on scope level
* Fix HEAD requests handling ### Changed
* Log request processing errors * `App::configure` take an `FnOnce` instead of `Fn`
* Upgrade actix-net crates
* Always enable content encoding if encoding explicitly selected
* Allow multiple Applications on a single server with different state #49 ## [1.0.0-beta.3] - 2019-05-04
* CORS middleware: allowed_headers is defaulting to None #50 ### Added
* Add helper function for executing futures `test::block_fn()`
## 0.3.1 (2018-01-13) ### Changed
* Fix directory entry path #47 * Extractor configuration could be registered with `App::data()`
or with `Resource::data()` #775
* Do not enable chunked encoding for HTTP/1.0 * Route data is unified with app data, `Route::data()` moved to resource
level to `Resource::data()`
* Allow explicitly disable chunked encoding * CORS handling without headers #702
* Allow to construct `Data` instances to avoid double `Arc` for `Send + Sync` types.
## 0.3.0 (2018-01-12) ### Fixed
* HTTP/2 Support * Fix `NormalizePath` middleware impl #806
* Refactor streaming responses ### Deleted
* Refactor error handling * `App::data_factory()` is deleted.
* Asynchronous middlewares
* Refactor logger middleware ## [1.0.0-beta.2] - 2019-04-24
* Content compression/decompression (br, gzip, deflate) ### Added
* Server multi-threading * Add raw services support via `web::service()`
* Gracefull shutdown support * Add helper functions for reading response body `test::read_body()`
* Add support for `remainder match` (i.e "/path/{tail}*")
## 0.2.1 (2017-11-03) * Extend `Responder` trait, allow to override status code and headers.
* Allow to start tls server with `HttpServer::serve_tls` * Store visit and login timestamp in the identity cookie #502
* Export `Frame` enum ### Changed
* Add conversion impl from `HttpResponse` and `BinaryBody` to a `Frame` * `.to_async()` handler can return `Responder` type #792
### Fixed
## 0.2.0 (2017-10-30) * Fix async web::Data factory handling
* Do not use `http::Uri` as it can not parse some valid paths
* Refactor response `Body` ## [1.0.0-beta.1] - 2019-04-20
* Refactor `RouteRecognizer` usability ### Added
* Refactor `HttpContext::write` * Add helper functions for reading test response body,
`test::read_response()` and test::read_response_json()`
* Refactor `Payload` stream * Add `.peer_addr()` #744
* Re-use `BinaryBody` for `Frame::Payload` * Add `NormalizePath` middleware
* Stop http actor on `write_eof` ### Changed
* Fix disconnection handling. * Rename `RouterConfig` to `ServiceConfig`
* Rename `test::call_success` to `test::call_service`
## 0.1.0 (2017-10-23) * Removed `ServiceRequest::from_parts()` as it is unsafe to create from parts.
* First release * `CookieIdentityPolicy::max_age()` accepts value in seconds
### Fixed
* Fixed `TestRequest::app_data()`
## [1.0.0-alpha.6] - 2019-04-14
### Changed
* Allow to use any service as default service.
* Remove generic type for request payload, always use default.
* Removed `Decompress` middleware. Bytes, String, Json, Form extractors
automatically decompress payload.
* Make extractor config type explicit. Add `FromRequest::Config` associated type.
## [1.0.0-alpha.5] - 2019-04-12
### Added
* Added async io `TestBuffer` for testing.
### Deleted
* Removed native-tls support
## [1.0.0-alpha.4] - 2019-04-08
### Added
* `App::configure()` allow to offload app configuration to different methods
* Added `URLPath` option for logger
* Added `ServiceRequest::app_data()`, returns `Data<T>`
* Added `ServiceFromRequest::app_data()`, returns `Data<T>`
### Changed
* `FromRequest` trait refactoring
* Move multipart support to actix-multipart crate
### Fixed
* Fix body propagation in Response::from_error. #760
## [1.0.0-alpha.3] - 2019-04-02
### Changed
* Renamed `TestRequest::to_service()` to `TestRequest::to_srv_request()`
* Renamed `TestRequest::to_response()` to `TestRequest::to_srv_response()`
* Removed `Deref` impls
### Removed
* Removed unused `actix_web::web::md()`
## [1.0.0-alpha.2] - 2019-03-29
### Added
* rustls support
### Changed
* use forked cookie
* multipart::Field renamed to MultipartField
## [1.0.0-alpha.1] - 2019-03-28
### Changed
* Complete architecture re-design.
* Return 405 response if no matching route found within resource #538

View File

@ -1,130 +1,142 @@
[package] [package]
name = "actix-web" name = "actix-web"
version = "0.5.0-dev" version = "2.0.0-alpha.1"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"] authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix web is a simple, pragmatic, extremely fast, web framework for Rust." description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust."
readme = "README.md" readme = "README.md"
keywords = ["http", "web", "framework", "async", "futures"] keywords = ["actix", "http", "web", "framework", "async"]
homepage = "https://github.com/actix/actix-web" homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git" repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-web/" documentation = "https://docs.rs/actix-web/"
categories = ["network-programming", "asynchronous", categories = ["network-programming", "asynchronous",
"web-programming::http-server", "web-programming::http-server",
"web-programming::http-client",
"web-programming::websocket"] "web-programming::websocket"]
license = "MIT/Apache-2.0" license = "MIT/Apache-2.0"
exclude = [".gitignore", ".travis.yml", ".cargo/config", exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
"appveyor.yml", "/examples/**"] edition = "2018"
build = "build.rs"
[package.metadata.docs.rs]
features = ["openssl", "brotli", "flate2-zlib", "secure-cookies", "client"]
[badges] [badges]
travis-ci = { repository = "actix/actix-web", branch = "master" } travis-ci = { repository = "actix/actix-web", branch = "master" }
appveyor = { repository = "fafhrd91/actix-web-hdy9d" }
codecov = { repository = "actix/actix-web", branch = "master", service = "github" } codecov = { repository = "actix/actix-web", branch = "master", service = "github" }
[lib] [lib]
name = "actix_web" name = "actix_web"
path = "src/lib.rs" path = "src/lib.rs"
[features] [workspace]
default = ["session", "brotli"] members = [
".",
"awc",
"actix-http",
"actix-cors",
"actix-files",
"actix-framed",
"actix-session",
"actix-identity",
"actix-multipart",
"actix-web-actors",
"actix-web-codegen",
"test-server",
]
# tls [features]
tls = ["native-tls", "tokio-tls"] default = ["brotli", "flate2-zlib", "client", "fail"]
# http client
client = ["awc"]
# brotli encoding, requires c compiler
brotli = ["actix-http/brotli"]
# miniz-sys backend for flate2 crate
flate2-zlib = ["actix-http/flate2-zlib"]
# rust backend for flate2 crate
flate2-rust = ["actix-http/flate2-rust"]
# sessions feature, session require "ring" crate and c compiler
secure-cookies = ["actix-http/secure-cookies"]
fail = ["actix-http/fail"]
# openssl # openssl
alpn = ["openssl", "openssl/v102", "openssl/v110", "tokio-openssl"] openssl = ["open-ssl", "actix-server/openssl", "awc/openssl"]
# sessions # rustls
session = ["cookie/secure"] # rustls = ["rust-tls", "actix-server/rustls", "awc/rustls"]
# brotli encoding
brotli = ["brotli2"]
[dependencies] [dependencies]
actix = "^0.5.5" actix-codec = "0.2.0-alpha.1"
actix-service = "1.0.0-alpha.1"
actix-utils = "0.5.0-alpha.1"
actix-router = "0.1.5"
actix-rt = "1.0.0-alpha.1"
actix-web-codegen = "0.2.0-alpha.1"
actix-http = "0.3.0-alpha.1"
actix-server = "0.8.0-alpha.1"
actix-server-config = "0.3.0-alpha.1"
actix-testing = "0.3.0-alpha.1"
actix-threadpool = "0.2.0-alpha.1"
awc = { version = "0.3.0-alpha.1", optional = true }
base64 = "0.9" bytes = "0.4"
bitflags = "1.0" derive_more = "0.99.2"
failure = "0.1.1" encoding_rs = "0.8"
flate2 = "1.0" futures = "0.3.1"
h2 = "0.1" hashbrown = "0.6.3"
http = "^0.1.5"
httparse = "1.2"
http-range = "0.1"
libc = "0.2"
log = "0.4" log = "0.4"
mime = "0.3" mime = "0.3"
mime_guess = "2.0.0-alpha" net2 = "0.2.33"
num_cpus = "1.0" parking_lot = "0.9"
percent-encoding = "1.0" pin-project = "0.4.5"
rand = "0.4" regex = "1.0"
regex = "0.2" serde = { version = "1.0", features=["derive"] }
serde = "1.0"
serde_json = "1.0" serde_json = "1.0"
serde_urlencoded = "0.5" serde_urlencoded = "0.6.1"
sha1 = "0.6" time = "0.1.42"
smallvec = "0.6" url = "2.1"
time = "0.1"
encoding = "0.2"
language-tags = "0.2"
lazy_static = "1.0"
url = { version="1.7", features=["query_encoding"] }
cookie = { version="0.10", features=["percent-encode"] }
brotli2 = { version="^0.3.2", optional = true }
# io # ssl support
mio = "^0.6.13" open-ssl = { version="0.10", package="openssl", optional = true }
net2 = "0.2" # rust-tls = { version = "0.16", package="rustls", optional = true }
bytes = "0.4"
byteorder = "1"
futures = "0.1"
futures-cpupool = "0.1"
tokio-io = "0.1"
tokio-core = "0.1"
trust-dns-resolver = "0.8"
# native-tls
native-tls = { version="0.1", optional = true }
tokio-tls = { version="0.1", optional = true }
# openssl
openssl = { version="0.10", optional = true }
tokio-openssl = { version="0.2", optional = true }
[dev-dependencies] [dev-dependencies]
env_logger = "0.5" # actix = "0.8.3"
skeptic = "0.13" actix-connect = "0.3.0-alpha.1"
actix-http-test = "0.3.0-alpha.1"
rand = "0.7"
env_logger = "0.6"
serde_derive = "1.0" serde_derive = "1.0"
brotli2 = "0.3.2"
[build-dependencies] flate2 = "1.0.2"
skeptic = "0.13"
version_check = "0.1"
[profile.release] [profile.release]
lto = true lto = true
opt-level = 3 opt-level = 3
codegen-units = 1 codegen-units = 1
[workspace] [patch.crates-io]
members = [ actix-web = { path = "." }
"./", actix-http = { path = "actix-http" }
"examples/basics", actix-http-test = { path = "test-server" }
"examples/juniper", actix-web-codegen = { path = "actix-web-codegen" }
"examples/diesel", # actix-web-actors = { path = "actix-web-actors" }
"examples/r2d2", actix-cors = { path = "actix-cors" }
"examples/json", actix-identity = { path = "actix-identity" }
"examples/protobuf", actix-session = { path = "actix-session" }
"examples/hello-world", actix-files = { path = "actix-files" }
"examples/http-proxy", actix-multipart = { path = "actix-multipart" }
"examples/multipart", awc = { path = "awc" }
"examples/state",
"examples/redis-session", actix-codec = { git = "https://github.com/actix/actix-net.git" }
"examples/template_tera", actix-connect = { git = "https://github.com/actix/actix-net.git" }
"examples/tls", actix-rt = { git = "https://github.com/actix/actix-net.git" }
"examples/websocket", actix-macros = { git = "https://github.com/actix/actix-net.git" }
"examples/websocket-chat", actix-server = { git = "https://github.com/actix/actix-net.git" }
"examples/web-cors/backend", actix-server-config = { git = "https://github.com/actix/actix-net.git" }
"examples/unix-socket", actix-service = { git = "https://github.com/actix/actix-net.git" }
"tools/wsload/", actix-testing = { git = "https://github.com/actix/actix-net.git" }
] actix-utils = { git = "https://github.com/actix/actix-net.git" }

556
MIGRATION.md Normal file
View File

@ -0,0 +1,556 @@
## 2.0.0
* Sync handlers has been removed. `.to_async()` method has been renamed to `.to()`
replace `fn` with `async fn` to convert sync handler to async
* `TestServer::new()` renamed to `TestServer::start()`
## 1.0.1
* Cors middleware has been moved to `actix-cors` crate
instead of
```rust
use actix_web::middleware::cors::Cors;
```
use
```rust
use actix_cors::Cors;
```
* Identity middleware has been moved to `actix-identity` crate
instead of
```rust
use actix_web::middleware::identity::{Identity, CookieIdentityPolicy, IdentityService};
```
use
```rust
use actix_identity::{Identity, CookieIdentityPolicy, IdentityService};
```
## 1.0.0
* Extractor configuration. In version 1.0 this is handled with the new `Data` mechanism for both setting and retrieving the configuration
instead of
```rust
#[derive(Default)]
struct ExtractorConfig {
config: String,
}
impl FromRequest for YourExtractor {
type Config = ExtractorConfig;
type Result = Result<YourExtractor, Error>;
fn from_request(req: &HttpRequest, cfg: &Self::Config) -> Self::Result {
println!("use the config: {:?}", cfg.config);
...
}
}
App::new().resource("/route_with_config", |r| {
r.post().with_config(handler_fn, |cfg| {
cfg.0.config = "test".to_string();
})
})
```
use the HttpRequest to get the configuration like any other `Data` with `req.app_data::<C>()` and set it with the `data()` method on the `resource`
```rust
#[derive(Default)]
struct ExtractorConfig {
config: String,
}
impl FromRequest for YourExtractor {
type Error = Error;
type Future = Result<Self, Self::Error>;
type Config = ExtractorConfig;
fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future {
let cfg = req.app_data::<ExtractorConfig>();
println!("config data?: {:?}", cfg.unwrap().role);
...
}
}
App::new().service(
resource("/route_with_config")
.data(ExtractorConfig {
config: "test".to_string(),
})
.route(post().to(handler_fn)),
)
```
* Resource registration. 1.0 version uses generalized resource
registration via `.service()` method.
instead of
```rust
App.new().resource("/welcome", |r| r.f(welcome))
```
use App's or Scope's `.service()` method. `.service()` method accepts
object that implements `HttpServiceFactory` trait. By default
actix-web provides `Resource` and `Scope` services.
```rust
App.new().service(
web::resource("/welcome")
.route(web::get().to(welcome))
.route(web::post().to(post_handler))
```
* Scope registration.
instead of
```rust
let app = App::new().scope("/{project_id}", |scope| {
scope
.resource("/path1", |r| r.f(|_| HttpResponse::Ok()))
.resource("/path2", |r| r.f(|_| HttpResponse::Ok()))
.resource("/path3", |r| r.f(|_| HttpResponse::MethodNotAllowed()))
});
```
use `.service()` for registration and `web::scope()` as scope object factory.
```rust
let app = App::new().service(
web::scope("/{project_id}")
.service(web::resource("/path1").to(|| HttpResponse::Ok()))
.service(web::resource("/path2").to(|| HttpResponse::Ok()))
.service(web::resource("/path3").to(|| HttpResponse::MethodNotAllowed()))
);
```
* `.with()`, `.with_async()` registration methods have been renamed to `.to()` and `.to_async()`.
instead of
```rust
App.new().resource("/welcome", |r| r.with(welcome))
```
use `.to()` or `.to_async()` methods
```rust
App.new().service(web::resource("/welcome").to(welcome))
```
* Passing arguments to handler with extractors, multiple arguments are allowed
instead of
```rust
fn welcome((body, req): (Bytes, HttpRequest)) -> ... {
...
}
```
use multiple arguments
```rust
fn welcome(body: Bytes, req: HttpRequest) -> ... {
...
}
```
* `.f()`, `.a()` and `.h()` handler registration methods have been removed.
Use `.to()` for handlers and `.to_async()` for async handlers. Handler function
must use extractors.
instead of
```rust
App.new().resource("/welcome", |r| r.f(welcome))
```
use App's `to()` or `to_async()` methods
```rust
App.new().service(web::resource("/welcome").to(welcome))
```
* `HttpRequest` does not provide access to request's payload stream.
instead of
```rust
fn index(req: &HttpRequest) -> Box<Future<Item=HttpResponse, Error=Error>> {
req
.payload()
.from_err()
.fold((), |_, chunk| {
...
})
.map(|_| HttpResponse::Ok().finish())
.responder()
}
```
use `Payload` extractor
```rust
fn index(stream: web::Payload) -> impl Future<Item=HttpResponse, Error=Error> {
stream
.from_err()
.fold((), |_, chunk| {
...
})
.map(|_| HttpResponse::Ok().finish())
}
```
* `State` is now `Data`. You register Data during the App initialization process
and then access it from handlers either using a Data extractor or using
HttpRequest's api.
instead of
```rust
App.with_state(T)
```
use App's `data` method
```rust
App.new()
.data(T)
```
and either use the Data extractor within your handler
```rust
use actix_web::web::Data;
fn endpoint_handler(Data<T>)){
...
}
```
.. or access your Data element from the HttpRequest
```rust
fn endpoint_handler(req: HttpRequest) {
let data: Option<Data<T>> = req.app_data::<T>();
}
```
* AsyncResponder is removed, use `.to_async()` registration method and `impl Future<>` as result type.
instead of
```rust
use actix_web::AsyncResponder;
fn endpoint_handler(...) -> impl Future<Item=HttpResponse, Error=Error>{
...
.responder()
}
```
.. simply omit AsyncResponder and the corresponding responder() finish method
* Middleware
instead of
```rust
let app = App::new()
.middleware(middleware::Logger::default())
```
use `.wrap()` method
```rust
let app = App::new()
.wrap(middleware::Logger::default())
.route("/index.html", web::get().to(index));
```
* `HttpRequest::body()`, `HttpRequest::urlencoded()`, `HttpRequest::json()`, `HttpRequest::multipart()`
method have been removed. Use `Bytes`, `String`, `Form`, `Json`, `Multipart` extractors instead.
instead of
```rust
fn index(req: &HttpRequest) -> Responder {
req.body()
.and_then(|body| {
...
})
}
```
use
```rust
fn index(body: Bytes) -> Responder {
...
}
```
* `actix_web::server` module has been removed. To start http server use `actix_web::HttpServer` type
* StaticFiles and NamedFile has been move to separate create.
instead of `use actix_web::fs::StaticFile`
use `use actix_files::Files`
instead of `use actix_web::fs::Namedfile`
use `use actix_files::NamedFile`
* Multipart has been move to separate create.
instead of `use actix_web::multipart::Multipart`
use `use actix_multipart::Multipart`
* Response compression is not enabled by default.
To enable, use `Compress` middleware, `App::new().wrap(Compress::default())`.
* Session middleware moved to actix-session crate
* Actors support have been moved to `actix-web-actors` crate
* Custom Error
Instead of error_response method alone, ResponseError now provides two methods: error_response and render_response respectively. Where, error_response creates the error response and render_response returns the error response to the caller.
Simplest migration from 0.7 to 1.0 shall include below method to the custom implementation of ResponseError:
```rust
fn render_response(&self) -> HttpResponse {
self.error_response()
}
```
## 0.7.15
* The `' '` character is not percent decoded anymore before matching routes. If you need to use it in
your routes, you should use `%20`.
instead of
```rust
fn main() {
let app = App::new().resource("/my index", |r| {
r.method(http::Method::GET)
.with(index);
});
}
```
use
```rust
fn main() {
let app = App::new().resource("/my%20index", |r| {
r.method(http::Method::GET)
.with(index);
});
}
```
* If you used `AsyncResult::async` you need to replace it with `AsyncResult::future`
## 0.7.4
* `Route::with_config()`/`Route::with_async_config()` always passes configuration objects as tuple
even for handler with one parameter.
## 0.7
* `HttpRequest` does not implement `Stream` anymore. If you need to read request payload
use `HttpMessage::payload()` method.
instead of
```rust
fn index(req: HttpRequest) -> impl Responder {
req
.from_err()
.fold(...)
....
}
```
use `.payload()`
```rust
fn index(req: HttpRequest) -> impl Responder {
req
.payload() // <- get request payload stream
.from_err()
.fold(...)
....
}
```
* [Middleware](https://actix.rs/actix-web/actix_web/middleware/trait.Middleware.html)
trait uses `&HttpRequest` instead of `&mut HttpRequest`.
* Removed `Route::with2()` and `Route::with3()` use tuple of extractors instead.
instead of
```rust
fn index(query: Query<..>, info: Json<MyStruct) -> impl Responder {}
```
use tuple of extractors and use `.with()` for registration:
```rust
fn index((query, json): (Query<..>, Json<MyStruct)) -> impl Responder {}
```
* `Handler::handle()` uses `&self` instead of `&mut self`
* `Handler::handle()` accepts reference to `HttpRequest<_>` instead of value
* Removed deprecated `HttpServer::threads()`, use
[HttpServer::workers()](https://actix.rs/actix-web/actix_web/server/struct.HttpServer.html#method.workers) instead.
* Renamed `client::ClientConnectorError::Connector` to
`client::ClientConnectorError::Resolver`
* `Route::with()` does not return `ExtractorConfig`, to configure
extractor use `Route::with_config()`
instead of
```rust
fn main() {
let app = App::new().resource("/index.html", |r| {
r.method(http::Method::GET)
.with(index)
.limit(4096); // <- limit size of the payload
});
}
```
use
```rust
fn main() {
let app = App::new().resource("/index.html", |r| {
r.method(http::Method::GET)
.with_config(index, |cfg| { // <- register handler
cfg.limit(4096); // <- limit size of the payload
})
});
}
```
* `Route::with_async()` does not return `ExtractorConfig`, to configure
extractor use `Route::with_async_config()`
## 0.6
* `Path<T>` extractor return `ErrorNotFound` on failure instead of `ErrorBadRequest`
* `ws::Message::Close` now includes optional close reason.
`ws::CloseCode::Status` and `ws::CloseCode::Empty` have been removed.
* `HttpServer::threads()` renamed to `HttpServer::workers()`.
* `HttpServer::start_ssl()` and `HttpServer::start_tls()` deprecated.
Use `HttpServer::bind_ssl()` and `HttpServer::bind_tls()` instead.
* `HttpRequest::extensions()` returns read only reference to the request's Extension
`HttpRequest::extensions_mut()` returns mutable reference.
* Instead of
`use actix_web::middleware::{
CookieSessionBackend, CookieSessionError, RequestSession,
Session, SessionBackend, SessionImpl, SessionStorage};`
use `actix_web::middleware::session`
`use actix_web::middleware::session{CookieSessionBackend, CookieSessionError,
RequestSession, Session, SessionBackend, SessionImpl, SessionStorage};`
* `FromRequest::from_request()` accepts mutable reference to a request
* `FromRequest::Result` has to implement `Into<Reply<Self>>`
* [`Responder::respond_to()`](
https://actix.rs/actix-web/actix_web/trait.Responder.html#tymethod.respond_to)
is generic over `S`
* Use `Query` extractor instead of HttpRequest::query()`.
```rust
fn index(q: Query<HashMap<String, String>>) -> Result<..> {
...
}
```
or
```rust
let q = Query::<HashMap<String, String>>::extract(req);
```
* Websocket operations are implemented as `WsWriter` trait.
you need to use `use actix_web::ws::WsWriter`
## 0.5
* `HttpResponseBuilder::body()`, `.finish()`, `.json()`
methods return `HttpResponse` instead of `Result<HttpResponse>`
* `actix_web::Method`, `actix_web::StatusCode`, `actix_web::Version`
moved to `actix_web::http` module
* `actix_web::header` moved to `actix_web::http::header`
* `NormalizePath` moved to `actix_web::http` module
* `HttpServer` moved to `actix_web::server`, added new `actix_web::server::new()` function,
shortcut for `actix_web::server::HttpServer::new()`
* `DefaultHeaders` middleware does not use separate builder, all builder methods moved to type itself
* `StaticFiles::new()`'s show_index parameter removed, use `show_files_listing()` method instead.
* `CookieSessionBackendBuilder` removed, all methods moved to `CookieSessionBackend` type
* `actix_web::httpcodes` module is deprecated, `HttpResponse::Ok()`, `HttpResponse::Found()` and other `HttpResponse::XXX()`
functions should be used instead
* `ClientRequestBuilder::body()` returns `Result<_, actix_web::Error>`
instead of `Result<_, http::Error>`
* `Application` renamed to a `App`
* `actix_web::Reply`, `actix_web::Resource` moved to `actix_web::dev`

View File

@ -1,26 +0,0 @@
.PHONY: default build test doc book clean
CARGO_FLAGS := --features "$(FEATURES) alpn"
default: test
build:
cargo build $(CARGO_FLAGS)
test: build clippy
cargo test $(CARGO_FLAGS)
skeptic:
USE_SKEPTIC=1 cargo test $(CARGO_FLAGS)
# cd examples/word-count && python setup.py install && pytest -v tests
clippy:
if $$CLIPPY; then cargo clippy $(CARGO_FLAGS); fi
doc: build
cargo doc --no-deps $(CARGO_FLAGS)
cd guide; mdbook build -d ../target/doc/guide/; cd ..
book:
cd guide; mdbook build -d ../target/doc/guide/; cd ..

View File

@ -1,74 +1,67 @@
# Actix web [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![Build status](https://ci.appveyor.com/api/projects/status/kkdb4yce7qhm5w85/branch/master?svg=true)](https://ci.appveyor.com/project/fafhrd91/actix-web-hdy9d/branch/master) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-web)](https://crates.io/crates/actix-web) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) # Actix web [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-web)](https://crates.io/crates/actix-web) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
Actix web is a simple, pragmatic, extremely fast, web framework for Rust. Actix web is a simple, pragmatic and extremely fast web framework for Rust.
* Supported *HTTP/1.x* and [*HTTP/2.0*](https://actix.github.io/actix-web/guide/qs_13.html) protocols * Supported *HTTP/1.x* and *HTTP/2.0* protocols
* Streaming and pipelining * Streaming and pipelining
* Keep-alive and slow requests handling * Keep-alive and slow requests handling
* Client/server [WebSockets](https://actix.github.io/actix-web/guide/qs_9.html) support * Client/server [WebSockets](https://actix.rs/docs/websockets/) support
* Transparent content compression/decompression (br, gzip, deflate) * Transparent content compression/decompression (br, gzip, deflate)
* Configurable [request routing](https://actix.github.io/actix-web/guide/qs_5.html) * Configurable [request routing](https://actix.rs/docs/url-dispatch/)
* Graceful server shutdown
* Multipart streams * Multipart streams
* Static assets * Static assets
* SSL support with openssl or native-tls * SSL support with OpenSSL or Rustls
* Middlewares ([Logger](https://actix.github.io/actix-web/guide/qs_10.html#logging), * Middlewares ([Logger, Session, CORS, etc](https://actix.rs/docs/middleware/))
[Session](https://actix.github.io/actix-web/guide/qs_10.html#user-sessions), * Includes an asynchronous [HTTP client](https://actix.rs/actix-web/actix_web/client/index.html)
[Redis sessions](https://github.com/actix/actix-redis), * Supports [Actix actor framework](https://github.com/actix/actix)
[DefaultHeaders](https://actix.github.io/actix-web/guide/qs_10.html#default-headers),
[CORS](https://actix.github.io/actix-web/actix_web/middleware/cors/index.html),
[CSRF](https://actix.github.io/actix-web/actix_web/middleware/csrf/index.html))
* Built on top of [Actix actor framework](https://github.com/actix/actix).
## Documentation ## Documentation & community resources
* [User Guide](http://actix.github.io/actix-web/guide/) * [User Guide](https://actix.rs/docs/)
* [API Documentation (Development)](http://actix.github.io/actix-web/actix_web/) * [API Documentation (1.0)](https://docs.rs/actix-web/)
* [API Documentation (Releases)](https://docs.rs/actix-web/)
* [Chat on gitter](https://gitter.im/actix/actix) * [Chat on gitter](https://gitter.im/actix/actix)
* Cargo package: [actix-web](https://crates.io/crates/actix-web) * Cargo package: [actix-web](https://crates.io/crates/actix-web)
* Minimum supported Rust version: 1.21 or later * Minimum supported Rust version: 1.39 or later
## Example ## Example
```rust ```rust
extern crate actix_web; use actix_web::{get, App, HttpServer, Responder};
use actix_web::{App, HttpServer, Path};
fn index(info: Path<(String, u32)>) -> String { #[get("/{id}/{name}/index.html")]
format!("Hello {}! id:{}", info.0, info.1) async fn index(info: web::Path<(u32, String)>) -> impl Responder {
format!("Hello {}! id:{}", info.1, info.0)
} }
fn main() { #[actix_rt::main]
HttpServer::new( async fn main() -> std::io::Result<()> {
|| App::new() HttpServer::new(|| App::new().service(index))
.resource("/{name}/{id}/index.html", |r| r.with(index))) .bind("127.0.0.1:8080")?
.bind("127.0.0.1:8080").unwrap() .start()
.run(); .await
} }
``` ```
### More examples ### More examples
* [Basics](https://github.com/actix/actix-web/tree/master/examples/basics/) * [Basics](https://github.com/actix/examples/tree/master/basics/)
* [Stateful](https://github.com/actix/actix-web/tree/master/examples/state/) * [Stateful](https://github.com/actix/examples/tree/master/state/)
* [Protobuf support](https://github.com/actix/actix-web/tree/master/examples/protobuf/) * [Multipart streams](https://github.com/actix/examples/tree/master/multipart/)
* [Multipart streams](https://github.com/actix/actix-web/tree/master/examples/multipart/) * [Simple websocket](https://github.com/actix/examples/tree/master/websocket/)
* [Simple websocket session](https://github.com/actix/actix-web/tree/master/examples/websocket/) * [Tera](https://github.com/actix/examples/tree/master/template_tera/) /
* [Tera templates](https://github.com/actix/actix-web/tree/master/examples/template_tera/) [Askama](https://github.com/actix/examples/tree/master/template_askama/) templates
* [Diesel integration](https://github.com/actix/actix-web/tree/master/examples/diesel/) * [Diesel integration](https://github.com/actix/examples/tree/master/diesel/)
* [SSL / HTTP/2.0](https://github.com/actix/actix-web/tree/master/examples/tls/) * [r2d2](https://github.com/actix/examples/tree/master/r2d2/)
* [Tcp/Websocket chat](https://github.com/actix/actix-web/tree/master/examples/websocket-chat/) * [SSL / HTTP/2.0](https://github.com/actix/examples/tree/master/tls/)
* [Json](https://github.com/actix/actix-web/tree/master/examples/json/) * [Tcp/Websocket chat](https://github.com/actix/examples/tree/master/websocket-chat/)
* [Json](https://github.com/actix/examples/tree/master/json/)
You may consider checking out You may consider checking out
[this directory](https://github.com/actix/actix-web/tree/master/examples) for more examples. [this directory](https://github.com/actix/examples/tree/master/) for more examples.
## Benchmarks ## Benchmarks
* [TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r15&hw=ph&test=plaintext) * [TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r18)
* Some basic benchmarks could be found in this [repository](https://github.com/fafhrd91/benchmarks).
## License ## License

9
actix-cors/CHANGES.md Normal file
View File

@ -0,0 +1,9 @@
# Changes
## [0.1.1] - unreleased
* Bump `derive_more` crate version to 0.15.0
## [0.1.0] - 2019-06-15
* Move cors middleware to separate crate

26
actix-cors/Cargo.toml Normal file
View File

@ -0,0 +1,26 @@
[package]
name = "actix-cors"
version = "0.2.0-alpha.1"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Cross-origin resource sharing (CORS) for Actix applications."
readme = "README.md"
keywords = ["web", "framework"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-cors/"
license = "MIT/Apache-2.0"
edition = "2018"
workspace = ".."
[lib]
name = "actix_cors"
path = "src/lib.rs"
[dependencies]
actix-web = "2.0.0-alpha.1"
actix-service = "1.0.0-alpha.1"
derive_more = "0.99.2"
futures = "0.3.1"
[dev-dependencies]
actix-rt = "1.0.0-alpha.1"

1
actix-cors/LICENSE-APACHE Symbolic link
View File

@ -0,0 +1 @@
../LICENSE-APACHE

1
actix-cors/LICENSE-MIT Symbolic link
View File

@ -0,0 +1 @@
../LICENSE-MIT

9
actix-cors/README.md Normal file
View File

@ -0,0 +1,9 @@
# Cors Middleware for actix web framework [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-cors)](https://crates.io/crates/actix-cors) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
## Documentation & community resources
* [User Guide](https://actix.rs/docs/)
* [API Documentation](https://docs.rs/actix-cors/)
* [Chat on gitter](https://gitter.im/actix/actix)
* Cargo package: [actix-cors](https://crates.io/crates/actix-cors)
* Minimum supported Rust version: 1.34 or later

1199
actix-cors/src/lib.rs Normal file

File diff suppressed because it is too large Load Diff

64
actix-files/CHANGES.md Normal file
View File

@ -0,0 +1,64 @@
# Changes
## [0.1.7] - 2019-11-06
* Add an additional `filename*` param in the `Content-Disposition` header of `actix_files::NamedFile` to be more compatible. (#1151)
## [0.1.6] - 2019-10-14
* Add option to redirect to a slash-ended path `Files` #1132
## [0.1.5] - 2019-10-08
* Bump up `mime_guess` crate version to 2.0.1
* Bump up `percent-encoding` crate version to 2.1
* Allow user defined request guards for `Files` #1113
## [0.1.4] - 2019-07-20
* Allow to disable `Content-Disposition` header #686
## [0.1.3] - 2019-06-28
* Do not set `Content-Length` header, let actix-http set it #930
## [0.1.2] - 2019-06-13
* Content-Length is 0 for NamedFile HEAD request #914
* Fix ring dependency from actix-web default features for #741
## [0.1.1] - 2019-06-01
* Static files are incorrectly served as both chunked and with length #812
## [0.1.0] - 2019-05-25
* NamedFile last-modified check always fails due to nano-seconds
in file modified date #820
## [0.1.0-beta.4] - 2019-05-12
* Update actix-web to beta.4
## [0.1.0-beta.1] - 2019-04-20
* Update actix-web to beta.1
## [0.1.0-alpha.6] - 2019-04-14
* Update actix-web to alpha6
## [0.1.0-alpha.4] - 2019-04-08
* Update actix-web to alpha4
## [0.1.0-alpha.2] - 2019-04-02
* Add default handler support
## [0.1.0-alpha.1] - 2019-03-28
* Initial impl

36
actix-files/Cargo.toml Normal file
View File

@ -0,0 +1,36 @@
[package]
name = "actix-files"
version = "0.2.0-alpha.1"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Static files support for actix web."
readme = "README.md"
keywords = ["actix", "http", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-files/"
categories = ["asynchronous", "web-programming::http-server"]
license = "MIT/Apache-2.0"
edition = "2018"
workspace = ".."
[lib]
name = "actix_files"
path = "src/lib.rs"
[dependencies]
actix-web = { version = "2.0.0-alpha.1", default-features = false }
actix-http = "0.3.0-alpha.1"
actix-service = "1.0.0-alpha.1"
bitflags = "1"
bytes = "0.4"
futures = "0.3.1"
derive_more = "0.99.2"
log = "0.4"
mime = "0.3"
mime_guess = "2.0.1"
percent-encoding = "2.1"
v_htmlescape = "0.4"
[dev-dependencies]
actix-rt = "1.0.0-alpha.1"
actix-web = { version = "2.0.0-alpha.1", features=["openssl"] }

1
actix-files/LICENSE-APACHE Symbolic link
View File

@ -0,0 +1 @@
../LICENSE-APACHE

1
actix-files/LICENSE-MIT Symbolic link
View File

@ -0,0 +1 @@
../LICENSE-MIT

9
actix-files/README.md Normal file
View File

@ -0,0 +1,9 @@
# Static files support for actix web [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-files)](https://crates.io/crates/actix-files) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
## Documentation & community resources
* [User Guide](https://actix.rs/docs/)
* [API Documentation](https://docs.rs/actix-files/)
* [Chat on gitter](https://gitter.im/actix/actix)
* Cargo package: [actix-files](https://crates.io/crates/actix-files)
* Minimum supported Rust version: 1.33 or later

41
actix-files/src/error.rs Normal file
View File

@ -0,0 +1,41 @@
use actix_web::{http::StatusCode, HttpResponse, ResponseError};
use derive_more::Display;
/// Errors which can occur when serving static files.
#[derive(Display, Debug, PartialEq)]
pub enum FilesError {
/// Path is not a directory
#[display(fmt = "Path is not a directory. Unable to serve static files")]
IsNotDirectory,
/// Cannot render directory
#[display(fmt = "Unable to render directory without index file")]
IsDirectory,
}
/// Return `NotFound` for `FilesError`
impl ResponseError for FilesError {
fn error_response(&self) -> HttpResponse {
HttpResponse::new(StatusCode::NOT_FOUND)
}
}
#[derive(Display, Debug, PartialEq)]
pub enum UriSegmentError {
/// The segment started with the wrapped invalid character.
#[display(fmt = "The segment started with the wrapped invalid character")]
BadStart(char),
/// The segment contained the wrapped invalid character.
#[display(fmt = "The segment contained the wrapped invalid character")]
BadChar(char),
/// The segment ended with the wrapped invalid character.
#[display(fmt = "The segment ended with the wrapped invalid character")]
BadEnd(char),
}
/// Return `BadRequest` for `UriSegmentError`
impl ResponseError for UriSegmentError {
fn status_code(&self) -> StatusCode {
StatusCode::BAD_REQUEST
}
}

1421
actix-files/src/lib.rs Normal file

File diff suppressed because it is too large Load Diff

455
actix-files/src/named.rs Normal file
View File

@ -0,0 +1,455 @@
use std::fs::{File, Metadata};
use std::io;
use std::ops::{Deref, DerefMut};
use std::path::{Path, PathBuf};
use std::time::{SystemTime, UNIX_EPOCH};
#[cfg(unix)]
use std::os::unix::fs::MetadataExt;
use bitflags::bitflags;
use mime;
use mime_guess::from_path;
use actix_http::body::SizedStream;
use actix_web::http::header::{
self, Charset, ContentDisposition, DispositionParam, DispositionType, ExtendedValue,
};
use actix_web::http::{ContentEncoding, StatusCode};
use actix_web::middleware::BodyEncoding;
use actix_web::{Error, HttpMessage, HttpRequest, HttpResponse, Responder};
use futures::future::{ready, Ready};
use crate::range::HttpRange;
use crate::ChunkedReadFile;
bitflags! {
pub(crate) struct Flags: u8 {
const ETAG = 0b0000_0001;
const LAST_MD = 0b0000_0010;
const CONTENT_DISPOSITION = 0b0000_0100;
}
}
impl Default for Flags {
fn default() -> Self {
Flags::all()
}
}
/// A file with an associated name.
#[derive(Debug)]
pub struct NamedFile {
path: PathBuf,
file: File,
modified: Option<SystemTime>,
pub(crate) md: Metadata,
pub(crate) flags: Flags,
pub(crate) status_code: StatusCode,
pub(crate) content_type: mime::Mime,
pub(crate) content_disposition: header::ContentDisposition,
pub(crate) encoding: Option<ContentEncoding>,
}
impl NamedFile {
/// Creates an instance from a previously opened file.
///
/// The given `path` need not exist and is only used to determine the `ContentType` and
/// `ContentDisposition` headers.
///
/// # Examples
///
/// ```rust
/// use actix_files::NamedFile;
/// use std::io::{self, Write};
/// use std::env;
/// use std::fs::File;
///
/// fn main() -> io::Result<()> {
/// let mut file = File::create("foo.txt")?;
/// file.write_all(b"Hello, world!")?;
/// let named_file = NamedFile::from_file(file, "bar.txt")?;
/// # std::fs::remove_file("foo.txt");
/// Ok(())
/// }
/// ```
pub fn from_file<P: AsRef<Path>>(file: File, path: P) -> io::Result<NamedFile> {
let path = path.as_ref().to_path_buf();
// Get the name of the file and use it to construct default Content-Type
// and Content-Disposition values
let (content_type, content_disposition) = {
let filename = match path.file_name() {
Some(name) => name.to_string_lossy(),
None => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Provided path has no filename",
));
}
};
let ct = from_path(&path).first_or_octet_stream();
let disposition_type = match ct.type_() {
mime::IMAGE | mime::TEXT | mime::VIDEO => DispositionType::Inline,
_ => DispositionType::Attachment,
};
let mut parameters =
vec![DispositionParam::Filename(String::from(filename.as_ref()))];
if !filename.is_ascii() {
parameters.push(DispositionParam::FilenameExt(ExtendedValue {
charset: Charset::Ext(String::from("UTF-8")),
language_tag: None,
value: filename.into_owned().into_bytes(),
}))
}
let cd = ContentDisposition {
disposition: disposition_type,
parameters: parameters,
};
(ct, cd)
};
let md = file.metadata()?;
let modified = md.modified().ok();
let encoding = None;
Ok(NamedFile {
path,
file,
content_type,
content_disposition,
md,
modified,
encoding,
status_code: StatusCode::OK,
flags: Flags::default(),
})
}
/// Attempts to open a file in read-only mode.
///
/// # Examples
///
/// ```rust
/// use actix_files::NamedFile;
///
/// let file = NamedFile::open("foo.txt");
/// ```
pub fn open<P: AsRef<Path>>(path: P) -> io::Result<NamedFile> {
Self::from_file(File::open(&path)?, path)
}
/// Returns reference to the underlying `File` object.
#[inline]
pub fn file(&self) -> &File {
&self.file
}
/// Retrieve the path of this file.
///
/// # Examples
///
/// ```rust
/// # use std::io;
/// use actix_files::NamedFile;
///
/// # fn path() -> io::Result<()> {
/// let file = NamedFile::open("test.txt")?;
/// assert_eq!(file.path().as_os_str(), "foo.txt");
/// # Ok(())
/// # }
/// ```
#[inline]
pub fn path(&self) -> &Path {
self.path.as_path()
}
/// Set response **Status Code**
pub fn set_status_code(mut self, status: StatusCode) -> Self {
self.status_code = status;
self
}
/// Set the MIME Content-Type for serving this file. By default
/// the Content-Type is inferred from the filename extension.
#[inline]
pub fn set_content_type(mut self, mime_type: mime::Mime) -> Self {
self.content_type = mime_type;
self
}
/// Set the Content-Disposition for serving this file. This allows
/// changing the inline/attachment disposition as well as the filename
/// sent to the peer. By default the disposition is `inline` for text,
/// image, and video content types, and `attachment` otherwise, and
/// the filename is taken from the path provided in the `open` method
/// after converting it to UTF-8 using.
/// [to_string_lossy](https://doc.rust-lang.org/std/ffi/struct.OsStr.html#method.to_string_lossy).
#[inline]
pub fn set_content_disposition(mut self, cd: header::ContentDisposition) -> Self {
self.content_disposition = cd;
self.flags.insert(Flags::CONTENT_DISPOSITION);
self
}
/// Disable `Content-Disposition` header.
///
/// By default Content-Disposition` header is enabled.
#[inline]
pub fn disable_content_disposition(mut self) -> Self {
self.flags.remove(Flags::CONTENT_DISPOSITION);
self
}
/// Set content encoding for serving this file
#[inline]
pub fn set_content_encoding(mut self, enc: ContentEncoding) -> Self {
self.encoding = Some(enc);
self
}
#[inline]
///Specifies whether to use ETag or not.
///
///Default is true.
pub fn use_etag(mut self, value: bool) -> Self {
self.flags.set(Flags::ETAG, value);
self
}
#[inline]
///Specifies whether to use Last-Modified or not.
///
///Default is true.
pub fn use_last_modified(mut self, value: bool) -> Self {
self.flags.set(Flags::LAST_MD, value);
self
}
pub(crate) fn etag(&self) -> Option<header::EntityTag> {
// This etag format is similar to Apache's.
self.modified.as_ref().map(|mtime| {
let ino = {
#[cfg(unix)]
{
self.md.ino()
}
#[cfg(not(unix))]
{
0
}
};
let dur = mtime
.duration_since(UNIX_EPOCH)
.expect("modification time must be after epoch");
header::EntityTag::strong(format!(
"{:x}:{:x}:{:x}:{:x}",
ino,
self.md.len(),
dur.as_secs(),
dur.subsec_nanos()
))
})
}
pub(crate) fn last_modified(&self) -> Option<header::HttpDate> {
self.modified.map(|mtime| mtime.into())
}
pub fn into_response(self, req: &HttpRequest) -> Result<HttpResponse, Error> {
if self.status_code != StatusCode::OK {
let mut resp = HttpResponse::build(self.status_code);
resp.set(header::ContentType(self.content_type.clone()))
.if_true(self.flags.contains(Flags::CONTENT_DISPOSITION), |res| {
res.header(
header::CONTENT_DISPOSITION,
self.content_disposition.to_string(),
);
});
if let Some(current_encoding) = self.encoding {
resp.encoding(current_encoding);
}
let reader = ChunkedReadFile {
size: self.md.len(),
offset: 0,
file: Some(self.file),
fut: None,
counter: 0,
};
return Ok(resp.streaming(reader));
}
let etag = if self.flags.contains(Flags::ETAG) {
self.etag()
} else {
None
};
let last_modified = if self.flags.contains(Flags::LAST_MD) {
self.last_modified()
} else {
None
};
// check preconditions
let precondition_failed = if !any_match(etag.as_ref(), req) {
true
} else if let (Some(ref m), Some(header::IfUnmodifiedSince(ref since))) =
(last_modified, req.get_header())
{
let t1: SystemTime = m.clone().into();
let t2: SystemTime = since.clone().into();
match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) {
(Ok(t1), Ok(t2)) => t1 > t2,
_ => false,
}
} else {
false
};
// check last modified
let not_modified = if !none_match(etag.as_ref(), req) {
true
} else if req.headers().contains_key(&header::IF_NONE_MATCH) {
false
} else if let (Some(ref m), Some(header::IfModifiedSince(ref since))) =
(last_modified, req.get_header())
{
let t1: SystemTime = m.clone().into();
let t2: SystemTime = since.clone().into();
match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) {
(Ok(t1), Ok(t2)) => t1 <= t2,
_ => false,
}
} else {
false
};
let mut resp = HttpResponse::build(self.status_code);
resp.set(header::ContentType(self.content_type.clone()))
.if_true(self.flags.contains(Flags::CONTENT_DISPOSITION), |res| {
res.header(
header::CONTENT_DISPOSITION,
self.content_disposition.to_string(),
);
});
// default compressing
if let Some(current_encoding) = self.encoding {
resp.encoding(current_encoding);
}
resp.if_some(last_modified, |lm, resp| {
resp.set(header::LastModified(lm));
})
.if_some(etag, |etag, resp| {
resp.set(header::ETag(etag));
});
resp.header(header::ACCEPT_RANGES, "bytes");
let mut length = self.md.len();
let mut offset = 0;
// check for range header
if let Some(ranges) = req.headers().get(&header::RANGE) {
if let Ok(rangesheader) = ranges.to_str() {
if let Ok(rangesvec) = HttpRange::parse(rangesheader, length) {
length = rangesvec[0].length;
offset = rangesvec[0].start;
resp.encoding(ContentEncoding::Identity);
resp.header(
header::CONTENT_RANGE,
format!(
"bytes {}-{}/{}",
offset,
offset + length - 1,
self.md.len()
),
);
} else {
resp.header(header::CONTENT_RANGE, format!("bytes */{}", length));
return Ok(resp.status(StatusCode::RANGE_NOT_SATISFIABLE).finish());
};
} else {
return Ok(resp.status(StatusCode::BAD_REQUEST).finish());
};
};
if precondition_failed {
return Ok(resp.status(StatusCode::PRECONDITION_FAILED).finish());
} else if not_modified {
return Ok(resp.status(StatusCode::NOT_MODIFIED).finish());
}
let reader = ChunkedReadFile {
offset,
size: length,
file: Some(self.file),
fut: None,
counter: 0,
};
if offset != 0 || length != self.md.len() {
Ok(resp.status(StatusCode::PARTIAL_CONTENT).streaming(reader))
} else {
Ok(resp.body(SizedStream::new(length, reader)))
}
}
}
impl Deref for NamedFile {
type Target = File;
fn deref(&self) -> &File {
&self.file
}
}
impl DerefMut for NamedFile {
fn deref_mut(&mut self) -> &mut File {
&mut self.file
}
}
/// Returns true if `req` has no `If-Match` header or one which matches `etag`.
fn any_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
match req.get_header::<header::IfMatch>() {
None | Some(header::IfMatch::Any) => true,
Some(header::IfMatch::Items(ref items)) => {
if let Some(some_etag) = etag {
for item in items {
if item.strong_eq(some_etag) {
return true;
}
}
}
false
}
}
}
/// Returns true if `req` doesn't have an `If-None-Match` header matching `req`.
fn none_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
match req.get_header::<header::IfNoneMatch>() {
Some(header::IfNoneMatch::Any) => false,
Some(header::IfNoneMatch::Items(ref items)) => {
if let Some(some_etag) = etag {
for item in items {
if item.weak_eq(some_etag) {
return false;
}
}
}
true
}
None => true,
}
}
impl Responder for NamedFile {
type Error = Error;
type Future = Ready<Result<HttpResponse, Error>>;
fn respond_to(self, req: &HttpRequest) -> Self::Future {
ready(self.into_response(req))
}
}

375
actix-files/src/range.rs Normal file
View File

@ -0,0 +1,375 @@
/// HTTP Range header representation.
#[derive(Debug, Clone, Copy)]
pub struct HttpRange {
pub start: u64,
pub length: u64,
}
static PREFIX: &str = "bytes=";
const PREFIX_LEN: usize = 6;
impl HttpRange {
/// Parses Range HTTP header string as per RFC 2616.
///
/// `header` is HTTP Range header (e.g. `bytes=bytes=0-9`).
/// `size` is full size of response (file).
pub fn parse(header: &str, size: u64) -> Result<Vec<HttpRange>, ()> {
if header.is_empty() {
return Ok(Vec::new());
}
if !header.starts_with(PREFIX) {
return Err(());
}
let size_sig = size as i64;
let mut no_overlap = false;
let all_ranges: Vec<Option<HttpRange>> = header[PREFIX_LEN..]
.split(',')
.map(|x| x.trim())
.filter(|x| !x.is_empty())
.map(|ra| {
let mut start_end_iter = ra.split('-');
let start_str = start_end_iter.next().ok_or(())?.trim();
let end_str = start_end_iter.next().ok_or(())?.trim();
if start_str.is_empty() {
// If no start is specified, end specifies the
// range start relative to the end of the file.
let mut length: i64 = end_str.parse().map_err(|_| ())?;
if length > size_sig {
length = size_sig;
}
Ok(Some(HttpRange {
start: (size_sig - length) as u64,
length: length as u64,
}))
} else {
let start: i64 = start_str.parse().map_err(|_| ())?;
if start < 0 {
return Err(());
}
if start >= size_sig {
no_overlap = true;
return Ok(None);
}
let length = if end_str.is_empty() {
// If no end is specified, range extends to end of the file.
size_sig - start
} else {
let mut end: i64 = end_str.parse().map_err(|_| ())?;
if start > end {
return Err(());
}
if end >= size_sig {
end = size_sig - 1;
}
end - start + 1
};
Ok(Some(HttpRange {
start: start as u64,
length: length as u64,
}))
}
})
.collect::<Result<_, _>>()?;
let ranges: Vec<HttpRange> = all_ranges.into_iter().filter_map(|x| x).collect();
if no_overlap && ranges.is_empty() {
return Err(());
}
Ok(ranges)
}
}
#[cfg(test)]
mod tests {
use super::*;
struct T(&'static str, u64, Vec<HttpRange>);
#[test]
fn test_parse() {
let tests = vec![
T("", 0, vec![]),
T("", 1000, vec![]),
T("foo", 0, vec![]),
T("bytes=", 0, vec![]),
T("bytes=7", 10, vec![]),
T("bytes= 7 ", 10, vec![]),
T("bytes=1-", 0, vec![]),
T("bytes=5-4", 10, vec![]),
T("bytes=0-2,5-4", 10, vec![]),
T("bytes=2-5,4-3", 10, vec![]),
T("bytes=--5,4--3", 10, vec![]),
T("bytes=A-", 10, vec![]),
T("bytes=A- ", 10, vec![]),
T("bytes=A-Z", 10, vec![]),
T("bytes= -Z", 10, vec![]),
T("bytes=5-Z", 10, vec![]),
T("bytes=Ran-dom, garbage", 10, vec![]),
T("bytes=0x01-0x02", 10, vec![]),
T("bytes= ", 10, vec![]),
T("bytes= , , , ", 10, vec![]),
T(
"bytes=0-9",
10,
vec![HttpRange {
start: 0,
length: 10,
}],
),
T(
"bytes=0-",
10,
vec![HttpRange {
start: 0,
length: 10,
}],
),
T(
"bytes=5-",
10,
vec![HttpRange {
start: 5,
length: 5,
}],
),
T(
"bytes=0-20",
10,
vec![HttpRange {
start: 0,
length: 10,
}],
),
T(
"bytes=15-,0-5",
10,
vec![HttpRange {
start: 0,
length: 6,
}],
),
T(
"bytes=1-2,5-",
10,
vec![
HttpRange {
start: 1,
length: 2,
},
HttpRange {
start: 5,
length: 5,
},
],
),
T(
"bytes=-2 , 7-",
11,
vec![
HttpRange {
start: 9,
length: 2,
},
HttpRange {
start: 7,
length: 4,
},
],
),
T(
"bytes=0-0 ,2-2, 7-",
11,
vec![
HttpRange {
start: 0,
length: 1,
},
HttpRange {
start: 2,
length: 1,
},
HttpRange {
start: 7,
length: 4,
},
],
),
T(
"bytes=-5",
10,
vec![HttpRange {
start: 5,
length: 5,
}],
),
T(
"bytes=-15",
10,
vec![HttpRange {
start: 0,
length: 10,
}],
),
T(
"bytes=0-499",
10000,
vec![HttpRange {
start: 0,
length: 500,
}],
),
T(
"bytes=500-999",
10000,
vec![HttpRange {
start: 500,
length: 500,
}],
),
T(
"bytes=-500",
10000,
vec![HttpRange {
start: 9500,
length: 500,
}],
),
T(
"bytes=9500-",
10000,
vec![HttpRange {
start: 9500,
length: 500,
}],
),
T(
"bytes=0-0,-1",
10000,
vec![
HttpRange {
start: 0,
length: 1,
},
HttpRange {
start: 9999,
length: 1,
},
],
),
T(
"bytes=500-600,601-999",
10000,
vec![
HttpRange {
start: 500,
length: 101,
},
HttpRange {
start: 601,
length: 399,
},
],
),
T(
"bytes=500-700,601-999",
10000,
vec![
HttpRange {
start: 500,
length: 201,
},
HttpRange {
start: 601,
length: 399,
},
],
),
// Match Apache laxity:
T(
"bytes= 1 -2 , 4- 5, 7 - 8 , ,,",
11,
vec![
HttpRange {
start: 1,
length: 2,
},
HttpRange {
start: 4,
length: 2,
},
HttpRange {
start: 7,
length: 2,
},
],
),
];
for t in tests {
let header = t.0;
let size = t.1;
let expected = t.2;
let res = HttpRange::parse(header, size);
if res.is_err() {
if expected.is_empty() {
continue;
} else {
assert!(
false,
"parse({}, {}) returned error {:?}",
header,
size,
res.unwrap_err()
);
}
}
let got = res.unwrap();
if got.len() != expected.len() {
assert!(
false,
"len(parseRange({}, {})) = {}, want {}",
header,
size,
got.len(),
expected.len()
);
continue;
}
for i in 0..expected.len() {
if got[i].start != expected[i].start {
assert!(
false,
"parseRange({}, {})[{}].start = {}, want {}",
header, size, i, got[i].start, expected[i].start
)
}
if got[i].length != expected[i].length {
assert!(
false,
"parseRange({}, {})[{}].length = {}, want {}",
header, size, i, got[i].length, expected[i].length
)
}
}
}
}
}

View File

@ -0,0 +1 @@
ÂTÇÉVù2þvI ª\ÇRË™ˆæeÞ<04>vDØ:è—½¬RVÖYpíÿ;ÍÏGñùp!2÷CŒ. <0C>û®õpA !ûߦÙx j+Uc÷±©X”c%Û;ï"yì­AI

View File

@ -0,0 +1 @@
ÂTÇÉVù2þvI ª\ÇRË™ˆæeÞ<04>vDØ:è—½¬RVÖYpíÿ;ÍÏGñùp!2÷CŒ. <0C>û®õpA !ûߦÙx j+Uc÷±©X”c%Û;ï"yì­AI

BIN
actix-files/tests/test.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 168 B

39
actix-framed/Cargo.toml Normal file
View File

@ -0,0 +1,39 @@
[package]
name = "actix-framed"
version = "0.3.0-alpha.1"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix framed app server"
readme = "README.md"
keywords = ["http", "web", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-framed/"
categories = ["network-programming", "asynchronous",
"web-programming::http-server",
"web-programming::websocket"]
license = "MIT/Apache-2.0"
edition = "2018"
workspace =".."
[lib]
name = "actix_framed"
path = "src/lib.rs"
[dependencies]
actix-codec = "0.2.0-alpha.1"
actix-service = "1.0.0-alpha.1"
actix-router = "0.1.2"
actix-rt = "1.0.0-alpha.1"
actix-http = "0.3.0-alpha.1"
actix-server-config = "0.3.0-alpha.1"
bytes = "0.4"
futures = "0.3.1"
pin-project = "0.4.6"
log = "0.4"
[dev-dependencies]
actix-server = { version = "0.8.0-alpha.1", features=["openssl"] }
actix-connect = { version = "0.3.0-alpha.1", features=["openssl"] }
actix-http-test = { version = "0.3.0-alpha.1", features=["openssl"] }
actix-utils = "0.5.0-alpha.1"

201
actix-framed/LICENSE-APACHE Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2017-NOW Nikolay Kim
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
actix-framed/LICENSE-MIT Normal file
View File

@ -0,0 +1,25 @@
Copyright (c) 2017 Nikolay Kim
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

8
actix-framed/README.md Normal file
View File

@ -0,0 +1,8 @@
# Framed app for actix web [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-framed)](https://crates.io/crates/actix-framed) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
## Documentation & community resources
* [API Documentation](https://docs.rs/actix-framed/)
* [Chat on gitter](https://gitter.im/actix/actix)
* Cargo package: [actix-framed](https://crates.io/crates/actix-framed)
* Minimum supported Rust version: 1.33 or later

20
actix-framed/changes.md Normal file
View File

@ -0,0 +1,20 @@
# Changes
## [0.2.1] - 2019-07-20
* Remove unneeded actix-utils dependency
## [0.2.0] - 2019-05-12
* Update dependencies
## [0.1.0] - 2019-04-16
* Update tests
## [0.1.0-alpha.1] - 2019-04-12
* Initial release

222
actix-framed/src/app.rs Normal file
View File

@ -0,0 +1,222 @@
use std::future::Future;
use std::pin::Pin;
use std::rc::Rc;
use std::task::{Context, Poll};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_http::h1::{Codec, SendResponse};
use actix_http::{Error, Request, Response};
use actix_router::{Path, Router, Url};
use actix_server_config::ServerConfig;
use actix_service::{IntoServiceFactory, Service, ServiceFactory};
use futures::future::{ok, FutureExt, LocalBoxFuture};
use crate::helpers::{BoxedHttpNewService, BoxedHttpService, HttpNewService};
use crate::request::FramedRequest;
use crate::state::State;
type BoxedResponse = LocalBoxFuture<'static, Result<(), Error>>;
pub trait HttpServiceFactory {
type Factory: ServiceFactory;
fn path(&self) -> &str;
fn create(self) -> Self::Factory;
}
/// Application builder
pub struct FramedApp<T, S = ()> {
state: State<S>,
services: Vec<(String, BoxedHttpNewService<FramedRequest<T, S>>)>,
}
impl<T: 'static> FramedApp<T, ()> {
pub fn new() -> Self {
FramedApp {
state: State::new(()),
services: Vec::new(),
}
}
}
impl<T: 'static, S: 'static> FramedApp<T, S> {
pub fn with(state: S) -> FramedApp<T, S> {
FramedApp {
services: Vec::new(),
state: State::new(state),
}
}
pub fn service<U>(mut self, factory: U) -> Self
where
U: HttpServiceFactory,
U::Factory: ServiceFactory<
Config = (),
Request = FramedRequest<T, S>,
Response = (),
Error = Error,
InitError = (),
> + 'static,
<U::Factory as ServiceFactory>::Future: 'static,
<U::Factory as ServiceFactory>::Service: Service<
Request = FramedRequest<T, S>,
Response = (),
Error = Error,
Future = LocalBoxFuture<'static, Result<(), Error>>,
>,
{
let path = factory.path().to_string();
self.services
.push((path, Box::new(HttpNewService::new(factory.create()))));
self
}
}
impl<T, S> IntoServiceFactory<FramedAppFactory<T, S>> for FramedApp<T, S>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
S: 'static,
{
fn into_factory(self) -> FramedAppFactory<T, S> {
FramedAppFactory {
state: self.state,
services: Rc::new(self.services),
}
}
}
#[derive(Clone)]
pub struct FramedAppFactory<T, S> {
state: State<S>,
services: Rc<Vec<(String, BoxedHttpNewService<FramedRequest<T, S>>)>>,
}
impl<T, S> ServiceFactory for FramedAppFactory<T, S>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
S: 'static,
{
type Config = ServerConfig;
type Request = (Request, Framed<T, Codec>);
type Response = ();
type Error = Error;
type InitError = ();
type Service = FramedAppService<T, S>;
type Future = CreateService<T, S>;
fn new_service(&self, _: &ServerConfig) -> Self::Future {
CreateService {
fut: self
.services
.iter()
.map(|(path, service)| {
CreateServiceItem::Future(
Some(path.clone()),
service.new_service(&()),
)
})
.collect(),
state: self.state.clone(),
}
}
}
#[doc(hidden)]
pub struct CreateService<T, S> {
fut: Vec<CreateServiceItem<T, S>>,
state: State<S>,
}
enum CreateServiceItem<T, S> {
Future(
Option<String>,
LocalBoxFuture<'static, Result<BoxedHttpService<FramedRequest<T, S>>, ()>>,
),
Service(String, BoxedHttpService<FramedRequest<T, S>>),
}
impl<S: 'static, T: 'static> Future for CreateService<T, S>
where
T: AsyncRead + AsyncWrite + Unpin,
{
type Output = Result<FramedAppService<T, S>, ()>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let mut done = true;
// poll http services
for item in &mut self.fut {
let res = match item {
CreateServiceItem::Future(ref mut path, ref mut fut) => {
match Pin::new(fut).poll(cx) {
Poll::Ready(Ok(service)) => {
Some((path.take().unwrap(), service))
}
Poll::Ready(Err(e)) => return Poll::Ready(Err(e)),
Poll::Pending => {
done = false;
None
}
}
}
CreateServiceItem::Service(_, _) => continue,
};
if let Some((path, service)) = res {
*item = CreateServiceItem::Service(path, service);
}
}
if done {
let router = self
.fut
.drain(..)
.fold(Router::build(), |mut router, item| {
match item {
CreateServiceItem::Service(path, service) => {
router.path(&path, service);
}
CreateServiceItem::Future(_, _) => unreachable!(),
}
router
});
Poll::Ready(Ok(FramedAppService {
router: router.finish(),
state: self.state.clone(),
}))
} else {
Poll::Pending
}
}
}
pub struct FramedAppService<T, S> {
state: State<S>,
router: Router<BoxedHttpService<FramedRequest<T, S>>>,
}
impl<S: 'static, T: 'static> Service for FramedAppService<T, S>
where
T: AsyncRead + AsyncWrite + Unpin,
{
type Request = (Request, Framed<T, Codec>);
type Response = ();
type Error = Error;
type Future = BoxedResponse;
fn poll_ready(&mut self, _: &mut Context) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, (req, framed): (Request, Framed<T, Codec>)) -> Self::Future {
let mut path = Path::new(Url::new(req.uri().clone()));
if let Some((srv, _info)) = self.router.recognize_mut(&mut path) {
return srv.call(FramedRequest::new(req, framed, path, self.state.clone()));
}
SendResponse::new(framed, Response::NotFound().finish())
.then(|_| ok(()))
.boxed_local()
}
}

View File

@ -0,0 +1,98 @@
use std::task::{Context, Poll};
use actix_http::Error;
use actix_service::{Service, ServiceFactory};
use futures::future::{FutureExt, LocalBoxFuture};
pub(crate) type BoxedHttpService<Req> = Box<
dyn Service<
Request = Req,
Response = (),
Error = Error,
Future = LocalBoxFuture<'static, Result<(), Error>>,
>,
>;
pub(crate) type BoxedHttpNewService<Req> = Box<
dyn ServiceFactory<
Config = (),
Request = Req,
Response = (),
Error = Error,
InitError = (),
Service = BoxedHttpService<Req>,
Future = LocalBoxFuture<'static, Result<BoxedHttpService<Req>, ()>>,
>,
>;
pub(crate) struct HttpNewService<T: ServiceFactory>(T);
impl<T> HttpNewService<T>
where
T: ServiceFactory<Response = (), Error = Error>,
T::Response: 'static,
T::Future: 'static,
T::Service: Service<Future = LocalBoxFuture<'static, Result<(), Error>>> + 'static,
<T::Service as Service>::Future: 'static,
{
pub fn new(service: T) -> Self {
HttpNewService(service)
}
}
impl<T> ServiceFactory for HttpNewService<T>
where
T: ServiceFactory<Config = (), Response = (), Error = Error>,
T::Request: 'static,
T::Future: 'static,
T::Service: Service<Future = LocalBoxFuture<'static, Result<(), Error>>> + 'static,
<T::Service as Service>::Future: 'static,
{
type Config = ();
type Request = T::Request;
type Response = ();
type Error = Error;
type InitError = ();
type Service = BoxedHttpService<T::Request>;
type Future = LocalBoxFuture<'static, Result<Self::Service, ()>>;
fn new_service(&self, _: &()) -> Self::Future {
let fut = self.0.new_service(&());
async move {
fut.await.map_err(|_| ()).map(|service| {
let service: BoxedHttpService<_> =
Box::new(HttpServiceWrapper { service });
service
})
}
.boxed_local()
}
}
struct HttpServiceWrapper<T: Service> {
service: T,
}
impl<T> Service for HttpServiceWrapper<T>
where
T: Service<
Response = (),
Future = LocalBoxFuture<'static, Result<(), Error>>,
Error = Error,
>,
T::Request: 'static,
{
type Request = T::Request;
type Response = ();
type Error = Error;
type Future = LocalBoxFuture<'static, Result<(), Error>>;
fn poll_ready(&mut self, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
self.service.poll_ready(cx)
}
fn call(&mut self, req: Self::Request) -> Self::Future {
self.service.call(req)
}
}

17
actix-framed/src/lib.rs Normal file
View File

@ -0,0 +1,17 @@
#![allow(clippy::type_complexity, clippy::new_without_default, dead_code)]
mod app;
mod helpers;
mod request;
mod route;
mod service;
mod state;
pub mod test;
// re-export for convinience
pub use actix_http::{http, Error, HttpMessage, Response, ResponseError};
pub use self::app::{FramedApp, FramedAppService};
pub use self::request::FramedRequest;
pub use self::route::FramedRoute;
pub use self::service::{SendError, VerifyWebSockets};
pub use self::state::State;

170
actix-framed/src/request.rs Normal file
View File

@ -0,0 +1,170 @@
use std::cell::{Ref, RefMut};
use actix_codec::Framed;
use actix_http::http::{HeaderMap, Method, Uri, Version};
use actix_http::{h1::Codec, Extensions, Request, RequestHead};
use actix_router::{Path, Url};
use crate::state::State;
pub struct FramedRequest<Io, S = ()> {
req: Request,
framed: Framed<Io, Codec>,
state: State<S>,
pub(crate) path: Path<Url>,
}
impl<Io, S> FramedRequest<Io, S> {
pub fn new(
req: Request,
framed: Framed<Io, Codec>,
path: Path<Url>,
state: State<S>,
) -> Self {
Self {
req,
framed,
state,
path,
}
}
}
impl<Io, S> FramedRequest<Io, S> {
/// Split request into a parts
pub fn into_parts(self) -> (Request, Framed<Io, Codec>, State<S>) {
(self.req, self.framed, self.state)
}
/// This method returns reference to the request head
#[inline]
pub fn head(&self) -> &RequestHead {
self.req.head()
}
/// This method returns muttable reference to the request head.
/// panics if multiple references of http request exists.
#[inline]
pub fn head_mut(&mut self) -> &mut RequestHead {
self.req.head_mut()
}
/// Shared application state
#[inline]
pub fn state(&self) -> &S {
self.state.get_ref()
}
/// Request's uri.
#[inline]
pub fn uri(&self) -> &Uri {
&self.head().uri
}
/// Read the Request method.
#[inline]
pub fn method(&self) -> &Method {
&self.head().method
}
/// Read the Request Version.
#[inline]
pub fn version(&self) -> Version {
self.head().version
}
#[inline]
/// Returns request's headers.
pub fn headers(&self) -> &HeaderMap {
&self.head().headers
}
/// The target path of this Request.
#[inline]
pub fn path(&self) -> &str {
self.head().uri.path()
}
/// The query string in the URL.
///
/// E.g., id=10
#[inline]
pub fn query_string(&self) -> &str {
if let Some(query) = self.uri().query().as_ref() {
query
} else {
""
}
}
/// Get a reference to the Path parameters.
///
/// Params is a container for url parameters.
/// A variable segment is specified in the form `{identifier}`,
/// where the identifier can be used later in a request handler to
/// access the matched value for that segment.
#[inline]
pub fn match_info(&self) -> &Path<Url> {
&self.path
}
/// Request extensions
#[inline]
pub fn extensions(&self) -> Ref<Extensions> {
self.head().extensions()
}
/// Mutable reference to a the request's extensions
#[inline]
pub fn extensions_mut(&self) -> RefMut<Extensions> {
self.head().extensions_mut()
}
}
#[cfg(test)]
mod tests {
use actix_http::http::{HeaderName, HeaderValue, HttpTryFrom};
use actix_http::test::{TestBuffer, TestRequest};
use super::*;
#[test]
fn test_reqest() {
let buf = TestBuffer::empty();
let framed = Framed::new(buf, Codec::default());
let req = TestRequest::with_uri("/index.html?q=1")
.header("content-type", "test")
.finish();
let path = Path::new(Url::new(req.uri().clone()));
let mut freq = FramedRequest::new(req, framed, path, State::new(10u8));
assert_eq!(*freq.state(), 10);
assert_eq!(freq.version(), Version::HTTP_11);
assert_eq!(freq.method(), Method::GET);
assert_eq!(freq.path(), "/index.html");
assert_eq!(freq.query_string(), "q=1");
assert_eq!(
freq.headers()
.get("content-type")
.unwrap()
.to_str()
.unwrap(),
"test"
);
freq.head_mut().headers.insert(
HeaderName::try_from("x-hdr").unwrap(),
HeaderValue::from_static("test"),
);
assert_eq!(
freq.headers().get("x-hdr").unwrap().to_str().unwrap(),
"test"
);
freq.extensions_mut().insert(100usize);
assert_eq!(*freq.extensions().get::<usize>().unwrap(), 100usize);
let (_, _, state) = freq.into_parts();
assert_eq!(*state, 10);
}
}

159
actix-framed/src/route.rs Normal file
View File

@ -0,0 +1,159 @@
use std::fmt;
use std::future::Future;
use std::marker::PhantomData;
use std::task::{Context, Poll};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_http::{http::Method, Error};
use actix_service::{Service, ServiceFactory};
use futures::future::{ok, FutureExt, LocalBoxFuture, Ready};
use log::error;
use crate::app::HttpServiceFactory;
use crate::request::FramedRequest;
/// Resource route definition
///
/// Route uses builder-like pattern for configuration.
/// If handler is not explicitly set, default *404 Not Found* handler is used.
pub struct FramedRoute<Io, S, F = (), R = (), E = ()> {
handler: F,
pattern: String,
methods: Vec<Method>,
state: PhantomData<(Io, S, R, E)>,
}
impl<Io, S> FramedRoute<Io, S> {
pub fn new(pattern: &str) -> Self {
FramedRoute {
handler: (),
pattern: pattern.to_string(),
methods: Vec::new(),
state: PhantomData,
}
}
pub fn get(path: &str) -> FramedRoute<Io, S> {
FramedRoute::new(path).method(Method::GET)
}
pub fn post(path: &str) -> FramedRoute<Io, S> {
FramedRoute::new(path).method(Method::POST)
}
pub fn put(path: &str) -> FramedRoute<Io, S> {
FramedRoute::new(path).method(Method::PUT)
}
pub fn delete(path: &str) -> FramedRoute<Io, S> {
FramedRoute::new(path).method(Method::DELETE)
}
pub fn method(mut self, method: Method) -> Self {
self.methods.push(method);
self
}
pub fn to<F, R, E>(self, handler: F) -> FramedRoute<Io, S, F, R, E>
where
F: FnMut(FramedRequest<Io, S>) -> R,
R: Future<Output = Result<(), E>> + 'static,
E: fmt::Debug,
{
FramedRoute {
handler,
pattern: self.pattern,
methods: self.methods,
state: PhantomData,
}
}
}
impl<Io, S, F, R, E> HttpServiceFactory for FramedRoute<Io, S, F, R, E>
where
Io: AsyncRead + AsyncWrite + 'static,
F: FnMut(FramedRequest<Io, S>) -> R + Clone,
R: Future<Output = Result<(), E>> + 'static,
E: fmt::Display,
{
type Factory = FramedRouteFactory<Io, S, F, R, E>;
fn path(&self) -> &str {
&self.pattern
}
fn create(self) -> Self::Factory {
FramedRouteFactory {
handler: self.handler,
methods: self.methods,
_t: PhantomData,
}
}
}
pub struct FramedRouteFactory<Io, S, F, R, E> {
handler: F,
methods: Vec<Method>,
_t: PhantomData<(Io, S, R, E)>,
}
impl<Io, S, F, R, E> ServiceFactory for FramedRouteFactory<Io, S, F, R, E>
where
Io: AsyncRead + AsyncWrite + 'static,
F: FnMut(FramedRequest<Io, S>) -> R + Clone,
R: Future<Output = Result<(), E>> + 'static,
E: fmt::Display,
{
type Config = ();
type Request = FramedRequest<Io, S>;
type Response = ();
type Error = Error;
type InitError = ();
type Service = FramedRouteService<Io, S, F, R, E>;
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: &()) -> Self::Future {
ok(FramedRouteService {
handler: self.handler.clone(),
methods: self.methods.clone(),
_t: PhantomData,
})
}
}
pub struct FramedRouteService<Io, S, F, R, E> {
handler: F,
methods: Vec<Method>,
_t: PhantomData<(Io, S, R, E)>,
}
impl<Io, S, F, R, E> Service for FramedRouteService<Io, S, F, R, E>
where
Io: AsyncRead + AsyncWrite + 'static,
F: FnMut(FramedRequest<Io, S>) -> R + Clone,
R: Future<Output = Result<(), E>> + 'static,
E: fmt::Display,
{
type Request = FramedRequest<Io, S>;
type Response = ();
type Error = Error;
type Future = LocalBoxFuture<'static, Result<(), Error>>;
fn poll_ready(&mut self, _: &mut Context) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: FramedRequest<Io, S>) -> Self::Future {
let fut = (self.handler)(req);
async move {
let res = fut.await;
if let Err(e) = res {
error!("Error in request handler: {}", e);
}
Ok(())
}
.boxed_local()
}
}

156
actix-framed/src/service.rs Normal file
View File

@ -0,0 +1,156 @@
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_http::body::BodySize;
use actix_http::error::ResponseError;
use actix_http::h1::{Codec, Message};
use actix_http::ws::{verify_handshake, HandshakeError};
use actix_http::{Request, Response};
use actix_service::{Service, ServiceFactory};
use futures::future::{err, ok, Either, Ready};
use futures::Future;
/// Service that verifies incoming request if it is valid websocket
/// upgrade request. In case of error returns `HandshakeError`
pub struct VerifyWebSockets<T, C> {
_t: PhantomData<(T, C)>,
}
impl<T, C> Default for VerifyWebSockets<T, C> {
fn default() -> Self {
VerifyWebSockets { _t: PhantomData }
}
}
impl<T, C> ServiceFactory for VerifyWebSockets<T, C> {
type Config = C;
type Request = (Request, Framed<T, Codec>);
type Response = (Request, Framed<T, Codec>);
type Error = (HandshakeError, Framed<T, Codec>);
type InitError = ();
type Service = VerifyWebSockets<T, C>;
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: &C) -> Self::Future {
ok(VerifyWebSockets { _t: PhantomData })
}
}
impl<T, C> Service for VerifyWebSockets<T, C> {
type Request = (Request, Framed<T, Codec>);
type Response = (Request, Framed<T, Codec>);
type Error = (HandshakeError, Framed<T, Codec>);
type Future = Ready<Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _: &mut Context) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, (req, framed): (Request, Framed<T, Codec>)) -> Self::Future {
match verify_handshake(req.head()) {
Err(e) => err((e, framed)),
Ok(_) => ok((req, framed)),
}
}
}
/// Send http/1 error response
pub struct SendError<T, R, E, C>(PhantomData<(T, R, E, C)>);
impl<T, R, E, C> Default for SendError<T, R, E, C>
where
T: AsyncRead + AsyncWrite,
E: ResponseError,
{
fn default() -> Self {
SendError(PhantomData)
}
}
impl<T, R, E, C> ServiceFactory for SendError<T, R, E, C>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
R: 'static,
E: ResponseError + 'static,
{
type Config = C;
type Request = Result<R, (E, Framed<T, Codec>)>;
type Response = R;
type Error = (E, Framed<T, Codec>);
type InitError = ();
type Service = SendError<T, R, E, C>;
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: &C) -> Self::Future {
ok(SendError(PhantomData))
}
}
impl<T, R, E, C> Service for SendError<T, R, E, C>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
R: 'static,
E: ResponseError + 'static,
{
type Request = Result<R, (E, Framed<T, Codec>)>;
type Response = R;
type Error = (E, Framed<T, Codec>);
type Future = Either<Ready<Result<R, (E, Framed<T, Codec>)>>, SendErrorFut<T, R, E>>;
fn poll_ready(&mut self, _: &mut Context) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Result<R, (E, Framed<T, Codec>)>) -> Self::Future {
match req {
Ok(r) => Either::Left(ok(r)),
Err((e, framed)) => {
let res = e.error_response().drop_body();
Either::Right(SendErrorFut {
framed: Some(framed),
res: Some((res, BodySize::Empty).into()),
err: Some(e),
_t: PhantomData,
})
}
}
}
}
#[pin_project::pin_project]
pub struct SendErrorFut<T, R, E> {
res: Option<Message<(Response<()>, BodySize)>>,
framed: Option<Framed<T, Codec>>,
err: Option<E>,
_t: PhantomData<R>,
}
impl<T, R, E> Future for SendErrorFut<T, R, E>
where
E: ResponseError,
T: AsyncRead + AsyncWrite + Unpin,
{
type Output = Result<R, (E, Framed<T, Codec>)>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
if let Some(res) = self.res.take() {
if self.framed.as_mut().unwrap().write(res).is_err() {
return Poll::Ready(Err((
self.err.take().unwrap(),
self.framed.take().unwrap(),
)));
}
}
match self.framed.as_mut().unwrap().flush(cx) {
Poll::Ready(Ok(_)) => {
Poll::Ready(Err((self.err.take().unwrap(), self.framed.take().unwrap())))
}
Poll::Ready(Err(_)) => {
Poll::Ready(Err((self.err.take().unwrap(), self.framed.take().unwrap())))
}
Poll::Pending => Poll::Pending,
}
}
}

29
actix-framed/src/state.rs Normal file
View File

@ -0,0 +1,29 @@
use std::ops::Deref;
use std::sync::Arc;
/// Application state
pub struct State<S>(Arc<S>);
impl<S> State<S> {
pub fn new(state: S) -> State<S> {
State(Arc::new(state))
}
pub fn get_ref(&self) -> &S {
self.0.as_ref()
}
}
impl<S> Deref for State<S> {
type Target = S;
fn deref(&self) -> &S {
self.0.as_ref()
}
}
impl<S> Clone for State<S> {
fn clone(&self) -> State<S> {
State(self.0.clone())
}
}

152
actix-framed/src/test.rs Normal file
View File

@ -0,0 +1,152 @@
//! Various helpers for Actix applications to use during testing.
use std::future::Future;
use actix_codec::Framed;
use actix_http::h1::Codec;
use actix_http::http::header::{Header, HeaderName, IntoHeaderValue};
use actix_http::http::{HttpTryFrom, Method, Uri, Version};
use actix_http::test::{TestBuffer, TestRequest as HttpTestRequest};
use actix_router::{Path, Url};
use crate::{FramedRequest, State};
/// Test `Request` builder.
pub struct TestRequest<S = ()> {
req: HttpTestRequest,
path: Path<Url>,
state: State<S>,
}
impl Default for TestRequest<()> {
fn default() -> TestRequest {
TestRequest {
req: HttpTestRequest::default(),
path: Path::new(Url::new(Uri::default())),
state: State::new(()),
}
}
}
impl TestRequest<()> {
/// Create TestRequest and set request uri
pub fn with_uri(path: &str) -> Self {
Self::get().uri(path)
}
/// Create TestRequest and set header
pub fn with_hdr<H: Header>(hdr: H) -> Self {
Self::default().set(hdr)
}
/// Create TestRequest and set header
pub fn with_header<K, V>(key: K, value: V) -> Self
where
HeaderName: HttpTryFrom<K>,
V: IntoHeaderValue,
{
Self::default().header(key, value)
}
/// Create TestRequest and set method to `Method::GET`
pub fn get() -> Self {
Self::default().method(Method::GET)
}
/// Create TestRequest and set method to `Method::POST`
pub fn post() -> Self {
Self::default().method(Method::POST)
}
}
impl<S> TestRequest<S> {
/// Create TestRequest and set request uri
pub fn with_state(state: S) -> TestRequest<S> {
let req = TestRequest::get();
TestRequest {
state: State::new(state),
req: req.req,
path: req.path,
}
}
/// Set HTTP version of this request
pub fn version(mut self, ver: Version) -> Self {
self.req.version(ver);
self
}
/// Set HTTP method of this request
pub fn method(mut self, meth: Method) -> Self {
self.req.method(meth);
self
}
/// Set HTTP Uri of this request
pub fn uri(mut self, path: &str) -> Self {
self.req.uri(path);
self
}
/// Set a header
pub fn set<H: Header>(mut self, hdr: H) -> Self {
self.req.set(hdr);
self
}
/// Set a header
pub fn header<K, V>(mut self, key: K, value: V) -> Self
where
HeaderName: HttpTryFrom<K>,
V: IntoHeaderValue,
{
self.req.header(key, value);
self
}
/// Set request path pattern parameter
pub fn param(mut self, name: &'static str, value: &'static str) -> Self {
self.path.add_static(name, value);
self
}
/// Complete request creation and generate `Request` instance
pub fn finish(mut self) -> FramedRequest<TestBuffer, S> {
let req = self.req.finish();
self.path.get_mut().update(req.uri());
let framed = Framed::new(TestBuffer::empty(), Codec::default());
FramedRequest::new(req, framed, self.path, self.state)
}
/// This method generates `FramedRequest` instance and executes async handler
pub async fn run<F, R, I, E>(self, f: F) -> Result<I, E>
where
F: FnOnce(FramedRequest<TestBuffer, S>) -> R,
R: Future<Output = Result<I, E>>,
{
f(self.finish()).await
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test() {
let req = TestRequest::with_uri("/index.html")
.header("x-test", "test")
.param("test", "123")
.finish();
assert_eq!(*req.state(), ());
assert_eq!(req.version(), Version::HTTP_11);
assert_eq!(req.method(), Method::GET);
assert_eq!(req.path(), "/index.html");
assert_eq!(req.query_string(), "");
assert_eq!(
req.headers().get("x-test").unwrap().to_str().unwrap(),
"test"
);
assert_eq!(&req.match_info()["test"], "123");
}
}

View File

@ -0,0 +1,158 @@
use actix_codec::{AsyncRead, AsyncWrite};
use actix_http::{body, http::StatusCode, ws, Error, HttpService, Response};
use actix_http_test::TestServer;
use actix_service::{pipeline_factory, IntoServiceFactory, ServiceFactory};
use actix_utils::framed::FramedTransport;
use bytes::{Bytes, BytesMut};
use futures::{future, SinkExt, StreamExt};
use actix_framed::{FramedApp, FramedRequest, FramedRoute, SendError, VerifyWebSockets};
async fn ws_service<T: AsyncRead + AsyncWrite>(
req: FramedRequest<T>,
) -> Result<(), Error> {
let (req, mut framed, _) = req.into_parts();
let res = ws::handshake(req.head()).unwrap().message_body(());
framed
.send((res, body::BodySize::None).into())
.await
.unwrap();
FramedTransport::new(framed.into_framed(ws::Codec::new()), service)
.await
.unwrap();
Ok(())
}
async fn service(msg: ws::Frame) -> Result<ws::Message, Error> {
let msg = match msg {
ws::Frame::Ping(msg) => ws::Message::Pong(msg),
ws::Frame::Text(text) => {
ws::Message::Text(String::from_utf8_lossy(&text.unwrap()).to_string())
}
ws::Frame::Binary(bin) => ws::Message::Binary(bin.unwrap().freeze()),
ws::Frame::Close(reason) => ws::Message::Close(reason),
_ => panic!(),
};
Ok(msg)
}
#[actix_rt::test]
async fn test_simple() {
let mut srv = TestServer::start(|| {
HttpService::build()
.upgrade(
FramedApp::new().service(FramedRoute::get("/index.html").to(ws_service)),
)
.finish(|_| future::ok::<_, Error>(Response::NotFound()))
});
assert!(srv.ws_at("/test").await.is_err());
// client service
let mut framed = srv.ws_at("/index.html").await.unwrap();
framed
.send(ws::Message::Text("text".to_string()))
.await
.unwrap();
let (item, mut framed) = framed.into_future().await;
assert_eq!(
item.unwrap().unwrap(),
ws::Frame::Text(Some(BytesMut::from("text")))
);
framed
.send(ws::Message::Binary("text".into()))
.await
.unwrap();
let (item, mut framed) = framed.into_future().await;
assert_eq!(
item.unwrap().unwrap(),
ws::Frame::Binary(Some(Bytes::from_static(b"text").into()))
);
framed.send(ws::Message::Ping("text".into())).await.unwrap();
let (item, mut framed) = framed.into_future().await;
assert_eq!(
item.unwrap().unwrap(),
ws::Frame::Pong("text".to_string().into())
);
framed
.send(ws::Message::Close(Some(ws::CloseCode::Normal.into())))
.await
.unwrap();
let (item, _) = framed.into_future().await;
assert_eq!(
item.unwrap().unwrap(),
ws::Frame::Close(Some(ws::CloseCode::Normal.into()))
);
}
#[actix_rt::test]
async fn test_service() {
let mut srv = TestServer::start(|| {
pipeline_factory(actix_http::h1::OneRequest::new().map_err(|_| ())).and_then(
pipeline_factory(
pipeline_factory(VerifyWebSockets::default())
.then(SendError::default())
.map_err(|_| ()),
)
.and_then(
FramedApp::new()
.service(FramedRoute::get("/index.html").to(ws_service))
.into_factory()
.map_err(|_| ()),
),
)
});
// non ws request
let res = srv.get("/index.html").send().await.unwrap();
assert_eq!(res.status(), StatusCode::BAD_REQUEST);
// not found
assert!(srv.ws_at("/test").await.is_err());
// client service
let mut framed = srv.ws_at("/index.html").await.unwrap();
framed
.send(ws::Message::Text("text".to_string()))
.await
.unwrap();
let (item, mut framed) = framed.into_future().await;
assert_eq!(
item.unwrap().unwrap(),
ws::Frame::Text(Some(BytesMut::from("text")))
);
framed
.send(ws::Message::Binary("text".into()))
.await
.unwrap();
let (item, mut framed) = framed.into_future().await;
assert_eq!(
item.unwrap().unwrap(),
ws::Frame::Binary(Some(Bytes::from_static(b"text").into()))
);
framed.send(ws::Message::Ping("text".into())).await.unwrap();
let (item, mut framed) = framed.into_future().await;
assert_eq!(
item.unwrap().unwrap(),
ws::Frame::Pong("text".to_string().into())
);
framed
.send(ws::Message::Close(Some(ws::CloseCode::Normal.into())))
.await
.unwrap();
let (item, _) = framed.into_future().await;
assert_eq!(
item.unwrap().unwrap(),
ws::Frame::Close(Some(ws::CloseCode::Normal.into()))
);
}

41
actix-http/.appveyor.yml Normal file
View File

@ -0,0 +1,41 @@
environment:
global:
PROJECT_NAME: actix-http
matrix:
# Stable channel
- TARGET: i686-pc-windows-msvc
CHANNEL: stable
- TARGET: x86_64-pc-windows-gnu
CHANNEL: stable
- TARGET: x86_64-pc-windows-msvc
CHANNEL: stable
# Nightly channel
- TARGET: i686-pc-windows-msvc
CHANNEL: nightly
- TARGET: x86_64-pc-windows-gnu
CHANNEL: nightly
- TARGET: x86_64-pc-windows-msvc
CHANNEL: nightly
# Install Rust and Cargo
# (Based on from https://github.com/rust-lang/libc/blob/master/appveyor.yml)
install:
- ps: >-
If ($Env:TARGET -eq 'x86_64-pc-windows-gnu') {
$Env:PATH += ';C:\msys64\mingw64\bin'
} ElseIf ($Env:TARGET -eq 'i686-pc-windows-gnu') {
$Env:PATH += ';C:\MinGW\bin'
}
- curl -sSf -o rustup-init.exe https://win.rustup.rs
- rustup-init.exe --default-host %TARGET% --default-toolchain %CHANNEL% -y
- set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
- rustc -Vv
- cargo -V
# 'cargo test' takes care of building for us, so disable Appveyor's build stage.
build: false
# Equivalent to Travis' `script` phase
test_script:
- cargo clean
- cargo test

259
actix-http/CHANGES.md Normal file
View File

@ -0,0 +1,259 @@
# Changes
## [0.2.11] - 2019-11-06
### Added
* Add support for serde_json::Value to be passed as argument to ResponseBuilder.body()
* Add an additional `filename*` param in the `Content-Disposition` header of `actix_files::NamedFile` to be more compatible. (#1151)
* Allow to use `std::convert::Infallible` as `actix_http::error::Error`
### Fixed
* To be compatible with non-English error responses, `ResponseError` rendered with `text/plain; charset=utf-8` header #1118
## [0.2.10] - 2019-09-11
### Added
* Add support for sending HTTP requests with `Rc<RequestHead>` in addition to sending HTTP requests with `RequestHead`
### Fixed
* h2 will use error response #1080
* on_connect result isn't added to request extensions for http2 requests #1009
## [0.2.9] - 2019-08-13
### Changed
* Dropped the `byteorder`-dependency in favor of `stdlib`-implementation
* Update percent-encoding to 2.1
* Update serde_urlencoded to 0.6.1
### Fixed
* Fixed a panic in the HTTP2 handshake in client HTTP requests (#1031)
## [0.2.8] - 2019-08-01
### Added
* Add `rustls` support
* Add `Clone` impl for `HeaderMap`
### Fixed
* awc client panic #1016
* Invalid response with compression middleware enabled, but compression-related features disabled #997
## [0.2.7] - 2019-07-18
### Added
* Add support for downcasting response errors #986
## [0.2.6] - 2019-07-17
### Changed
* Replace `ClonableService` with local copy
* Upgrade `rand` dependency version to 0.7
## [0.2.5] - 2019-06-28
### Added
* Add `on-connect` callback, `HttpServiceBuilder::on_connect()` #946
### Changed
* Use `encoding_rs` crate instead of unmaintained `encoding` crate
* Add `Copy` and `Clone` impls for `ws::Codec`
## [0.2.4] - 2019-06-16
### Fixed
* Do not compress NoContent (204) responses #918
## [0.2.3] - 2019-06-02
### Added
* Debug impl for ResponseBuilder
* From SizedStream and BodyStream for Body
### Changed
* SizedStream uses u64
## [0.2.2] - 2019-05-29
### Fixed
* Parse incoming stream before closing stream on disconnect #868
## [0.2.1] - 2019-05-25
### Fixed
* Handle socket read disconnect
## [0.2.0] - 2019-05-12
### Changed
* Update actix-service to 0.4
* Expect and upgrade services accept `ServerConfig` config.
### Deleted
* `OneRequest` service
## [0.1.5] - 2019-05-04
### Fixed
* Clean up response extensions in response pool #817
## [0.1.4] - 2019-04-24
### Added
* Allow to render h1 request headers in `Camel-Case`
### Fixed
* Read until eof for http/1.0 responses #771
## [0.1.3] - 2019-04-23
### Fixed
* Fix http client pool management
* Fix http client wait queue management #794
## [0.1.2] - 2019-04-23
### Fixed
* Fix BorrowMutError panic in client connector #793
## [0.1.1] - 2019-04-19
### Changed
* Cookie::max_age() accepts value in seconds
* Cookie::max_age_time() accepts value in time::Duration
* Allow to specify server address for client connector
## [0.1.0] - 2019-04-16
### Added
* Expose peer addr via `Request::peer_addr()` and `RequestHead::peer_addr`
### Changed
* `actix_http::encoding` always available
* use trust-dns-resolver 0.11.0
## [0.1.0-alpha.5] - 2019-04-12
### Added
* Allow to use custom service for upgrade requests
* Added `h1::SendResponse` future.
### Changed
* MessageBody::length() renamed to MessageBody::size() for consistency
* ws handshake verification functions take RequestHead instead of Request
## [0.1.0-alpha.4] - 2019-04-08
### Added
* Allow to use custom `Expect` handler
* Add minimal `std::error::Error` impl for `Error`
### Changed
* Export IntoHeaderValue
* Render error and return as response body
* Use thread pool for response body comression
### Deleted
* Removed PayloadBuffer
## [0.1.0-alpha.3] - 2019-04-02
### Added
* Warn when an unsealed private cookie isn't valid UTF-8
### Fixed
* Rust 1.31.0 compatibility
* Preallocate read buffer for h1 codec
* Detect socket disconnection during protocol selection
## [0.1.0-alpha.2] - 2019-03-29
### Added
* Added ws::Message::Nop, no-op websockets message
### Changed
* Do not use thread pool for decomression if chunk size is smaller than 2048.
## [0.1.0-alpha.1] - 2019-03-28
* Initial impl

View File

@ -0,0 +1,46 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at fafhrd91@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/

112
actix-http/Cargo.toml Normal file
View File

@ -0,0 +1,112 @@
[package]
name = "actix-http"
version = "0.3.0-alpha.1"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix http primitives"
readme = "README.md"
keywords = ["actix", "http", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-http/"
categories = ["network-programming", "asynchronous",
"web-programming::http-server",
"web-programming::websocket"]
license = "MIT/Apache-2.0"
edition = "2018"
workspace = ".."
[package.metadata.docs.rs]
features = ["openssl", "fail", "brotli", "flate2-zlib", "secure-cookies"]
[lib]
name = "actix_http"
path = "src/lib.rs"
[features]
default = []
# openssl
openssl = ["open-ssl", "actix-connect/openssl", "tokio-openssl"]
# rustls support
# rustls = ["rust-tls", "webpki-roots", "actix-connect/rustls"]
# brotli encoding, requires c compiler
brotli = ["brotli2"]
# miniz-sys backend for flate2 crate
flate2-zlib = ["flate2/miniz-sys"]
# rust backend for flate2 crate
flate2-rust = ["flate2/rust_backend"]
# failure integration. actix does not use failure anymore
fail = ["failure"]
# support for secure cookies
secure-cookies = ["ring"]
[dependencies]
actix-service = "1.0.0-alpha.1"
actix-codec = "0.2.0-alpha.1"
actix-connect = "1.0.0-alpha.1"
actix-utils = "0.5.0-alpha.1"
actix-server-config = "0.3.0-alpha.1"
actix-rt = "1.0.0-alpha.1"
actix-threadpool = "0.2.0-alpha.1"
base64 = "0.10"
bitflags = "1.0"
bytes = "0.4"
copyless = "0.1.4"
chrono = "0.4.6"
derive_more = "0.99.2"
either = "1.5.2"
encoding_rs = "0.8"
futures = "0.3.1"
hashbrown = "0.6.3"
h2 = "0.2.0-alpha.3"
http = "0.1.17"
httparse = "1.3"
indexmap = "1.2"
lazy_static = "1.0"
language-tags = "0.2"
log = "0.4"
mime = "0.3"
percent-encoding = "2.1"
pin-project = "0.4.5"
rand = "0.7"
regex = "1.0"
serde = "1.0"
serde_json = "1.0"
sha1 = "0.6"
slab = "0.4"
serde_urlencoded = "0.6.1"
time = "0.1.42"
tokio-net = "=0.2.0-alpha.6"
trust-dns-resolver = { version="0.18.0-alpha.1", default-features = false }
# for secure cookie
ring = { version = "0.16.9", optional = true }
# compression
brotli2 = { version="0.3.2", optional = true }
flate2 = { version="1.0.7", optional = true, default-features = false }
# optional deps
failure = { version = "0.1.5", optional = true }
open-ssl = { version="0.10", package="openssl", optional = true }
tokio-openssl = { version = "0.4.0-alpha.6", optional = true }
# rust-tls = { version = "0.16.0", package="rustls", optional = true }
# webpki-roots = { version = "0.18", optional = true }
[dev-dependencies]
#actix-server = { version = "0.8.0-alpha.1", features=["openssl", "rustls"] }
actix-server = { version = "0.8.0-alpha.1", features=["openssl"] }
actix-connect = { version = "1.0.0-alpha.1", features=["openssl"] }
actix-http-test = { version = "0.3.0-alpha.1", features=["openssl"] }
env_logger = "0.6"
serde_derive = "1.0"
open-ssl = { version="0.10", package="openssl" }

201
actix-http/LICENSE-APACHE Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2017-NOW Nikolay Kim
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
actix-http/LICENSE-MIT Normal file
View File

@ -0,0 +1,25 @@
Copyright (c) 2017 Nikolay Kim
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

46
actix-http/README.md Normal file
View File

@ -0,0 +1,46 @@
# Actix http [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-http)](https://crates.io/crates/actix-http) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
Actix http
## Documentation & community resources
* [User Guide](https://actix.rs/docs/)
* [API Documentation](https://docs.rs/actix-http/)
* [Chat on gitter](https://gitter.im/actix/actix)
* Cargo package: [actix-http](https://crates.io/crates/actix-http)
* Minimum supported Rust version: 1.31 or later
## Example
```rust
// see examples/framed_hello.rs for complete list of used crates.
extern crate actix_http;
use actix_http::{h1, Response, ServiceConfig};
fn main() {
Server::new().bind("framed_hello", "127.0.0.1:8080", || {
IntoFramed::new(|| h1::Codec::new(ServiceConfig::default())) // <- create h1 codec
.and_then(TakeItem::new().map_err(|_| ())) // <- read one request
.and_then(|(_req, _framed): (_, Framed<_, _>)| { // <- send response and close conn
SendResponse::send(_framed, Response::Ok().body("Hello world!"))
.map_err(|_| ())
.map(|_| ())
})
}).unwrap().run();
}
```
## License
This project is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
* MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
at your option.
## Code of Conduct
Contribution to the actix-http crate is organized under the terms of the
Contributor Covenant, the maintainer of actix-http, @fafhrd91, promises to
intervene to uphold that code of conduct.

View File

@ -0,0 +1,39 @@
use std::{env, io};
use actix_http::{Error, HttpService, Request, Response};
use actix_server::Server;
use bytes::BytesMut;
use futures::StreamExt;
use http::header::HeaderValue;
use log::info;
fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "echo=info");
env_logger::init();
Server::build()
.bind("echo", "127.0.0.1:8080", || {
HttpService::build()
.client_timeout(1000)
.client_disconnect(1000)
.finish(|mut req: Request| {
async move {
let mut body = BytesMut::new();
while let Some(item) = req.payload().next().await {
body.extend_from_slice(&item?);
}
info!("request body: {:?}", body);
Ok::<_, Error>(
Response::Ok()
.header(
"x-head",
HeaderValue::from_static("dummy value!"),
)
.body(body),
)
}
})
})?
.run()
}

View File

@ -0,0 +1,31 @@
use std::{env, io};
use actix_http::http::HeaderValue;
use actix_http::{Error, HttpService, Request, Response};
use actix_server::Server;
use bytes::BytesMut;
use futures::StreamExt;
use log::info;
async fn handle_request(mut req: Request) -> Result<Response, Error> {
let mut body = BytesMut::new();
while let Some(item) = req.payload().next().await {
body.extend_from_slice(&item?)
}
info!("request body: {:?}", body);
Ok(Response::Ok()
.header("x-head", HeaderValue::from_static("dummy value!"))
.body(body))
}
fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "echo=info");
env_logger::init();
Server::build()
.bind("echo", "127.0.0.1:8080", || {
HttpService::build().finish(handle_request)
})?
.run()
}

View File

@ -0,0 +1,26 @@
use std::{env, io};
use actix_http::{HttpService, Response};
use actix_server::Server;
use futures::future;
use http::header::HeaderValue;
use log::info;
fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "hello_world=info");
env_logger::init();
Server::build()
.bind("hello-world", "127.0.0.1:8080", || {
HttpService::build()
.client_timeout(1000)
.client_disconnect(1000)
.finish(|_req| {
info!("{:?}", _req);
let mut res = Response::Ok();
res.header("x-head", HeaderValue::from_static("dummy value!"));
future::ok::<_, ()>(res.body("Hello world!"))
})
})?
.run()
}

5
actix-http/rustfmt.toml Normal file
View File

@ -0,0 +1,5 @@
max_width = 89
reorder_imports = true
#wrap_comments = true
#fn_args_density = "Compressed"
#use_small_heuristics = false

589
actix-http/src/body.rs Normal file
View File

@ -0,0 +1,589 @@
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, mem};
use bytes::{Bytes, BytesMut};
use futures::Stream;
use pin_project::{pin_project, project};
use crate::error::Error;
#[derive(Debug, PartialEq, Copy, Clone)]
/// Body size hint
pub enum BodySize {
None,
Empty,
Sized(usize),
Sized64(u64),
Stream,
}
impl BodySize {
pub fn is_eof(&self) -> bool {
match self {
BodySize::None
| BodySize::Empty
| BodySize::Sized(0)
| BodySize::Sized64(0) => true,
_ => false,
}
}
}
/// Type that provides this trait can be streamed to a peer.
pub trait MessageBody {
fn size(&self) -> BodySize;
fn poll_next(&mut self, cx: &mut Context) -> Poll<Option<Result<Bytes, Error>>>;
}
impl MessageBody for () {
fn size(&self) -> BodySize {
BodySize::Empty
}
fn poll_next(&mut self, _: &mut Context) -> Poll<Option<Result<Bytes, Error>>> {
Poll::Ready(None)
}
}
impl<T: MessageBody> MessageBody for Box<T> {
fn size(&self) -> BodySize {
self.as_ref().size()
}
fn poll_next(&mut self, cx: &mut Context) -> Poll<Option<Result<Bytes, Error>>> {
self.as_mut().poll_next(cx)
}
}
#[pin_project]
pub enum ResponseBody<B> {
Body(B),
Other(Body),
}
impl ResponseBody<Body> {
pub fn into_body<B>(self) -> ResponseBody<B> {
match self {
ResponseBody::Body(b) => ResponseBody::Other(b),
ResponseBody::Other(b) => ResponseBody::Other(b),
}
}
}
impl<B> ResponseBody<B> {
pub fn take_body(&mut self) -> ResponseBody<B> {
std::mem::replace(self, ResponseBody::Other(Body::None))
}
}
impl<B: MessageBody> ResponseBody<B> {
pub fn as_ref(&self) -> Option<&B> {
if let ResponseBody::Body(ref b) = self {
Some(b)
} else {
None
}
}
}
impl<B: MessageBody> MessageBody for ResponseBody<B> {
fn size(&self) -> BodySize {
match self {
ResponseBody::Body(ref body) => body.size(),
ResponseBody::Other(ref body) => body.size(),
}
}
fn poll_next(&mut self, cx: &mut Context) -> Poll<Option<Result<Bytes, Error>>> {
match self {
ResponseBody::Body(ref mut body) => body.poll_next(cx),
ResponseBody::Other(ref mut body) => body.poll_next(cx),
}
}
}
impl<B: MessageBody> Stream for ResponseBody<B> {
type Item = Result<Bytes, Error>;
#[project]
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
#[project]
match self.project() {
ResponseBody::Body(ref mut body) => body.poll_next(cx),
ResponseBody::Other(ref mut body) => body.poll_next(cx),
}
}
}
/// Represents various types of http message body.
pub enum Body {
/// Empty response. `Content-Length` header is not set.
None,
/// Zero sized response body. `Content-Length` header is set to `0`.
Empty,
/// Specific response body.
Bytes(Bytes),
/// Generic message body.
Message(Box<dyn MessageBody>),
}
impl Body {
/// Create body from slice (copy)
pub fn from_slice(s: &[u8]) -> Body {
Body::Bytes(Bytes::from(s))
}
/// Create body from generic message body.
pub fn from_message<B: MessageBody + 'static>(body: B) -> Body {
Body::Message(Box::new(body))
}
}
impl MessageBody for Body {
fn size(&self) -> BodySize {
match self {
Body::None => BodySize::None,
Body::Empty => BodySize::Empty,
Body::Bytes(ref bin) => BodySize::Sized(bin.len()),
Body::Message(ref body) => body.size(),
}
}
fn poll_next(&mut self, cx: &mut Context) -> Poll<Option<Result<Bytes, Error>>> {
match self {
Body::None => Poll::Ready(None),
Body::Empty => Poll::Ready(None),
Body::Bytes(ref mut bin) => {
let len = bin.len();
if len == 0 {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::replace(bin, Bytes::new()))))
}
}
Body::Message(ref mut body) => body.poll_next(cx),
}
}
}
impl PartialEq for Body {
fn eq(&self, other: &Body) -> bool {
match *self {
Body::None => match *other {
Body::None => true,
_ => false,
},
Body::Empty => match *other {
Body::Empty => true,
_ => false,
},
Body::Bytes(ref b) => match *other {
Body::Bytes(ref b2) => b == b2,
_ => false,
},
Body::Message(_) => false,
}
}
}
impl fmt::Debug for Body {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Body::None => write!(f, "Body::None"),
Body::Empty => write!(f, "Body::Empty"),
Body::Bytes(ref b) => write!(f, "Body::Bytes({:?})", b),
Body::Message(_) => write!(f, "Body::Message(_)"),
}
}
}
impl From<&'static str> for Body {
fn from(s: &'static str) -> Body {
Body::Bytes(Bytes::from_static(s.as_ref()))
}
}
impl From<&'static [u8]> for Body {
fn from(s: &'static [u8]) -> Body {
Body::Bytes(Bytes::from_static(s))
}
}
impl From<Vec<u8>> for Body {
fn from(vec: Vec<u8>) -> Body {
Body::Bytes(Bytes::from(vec))
}
}
impl From<String> for Body {
fn from(s: String) -> Body {
s.into_bytes().into()
}
}
impl<'a> From<&'a String> for Body {
fn from(s: &'a String) -> Body {
Body::Bytes(Bytes::from(AsRef::<[u8]>::as_ref(&s)))
}
}
impl From<Bytes> for Body {
fn from(s: Bytes) -> Body {
Body::Bytes(s)
}
}
impl From<BytesMut> for Body {
fn from(s: BytesMut) -> Body {
Body::Bytes(s.freeze())
}
}
impl From<serde_json::Value> for Body {
fn from(v: serde_json::Value) -> Body {
Body::Bytes(v.to_string().into())
}
}
impl<S> From<SizedStream<S>> for Body
where
S: Stream<Item = Result<Bytes, Error>> + 'static,
{
fn from(s: SizedStream<S>) -> Body {
Body::from_message(s)
}
}
impl<S, E> From<BodyStream<S, E>> for Body
where
S: Stream<Item = Result<Bytes, E>> + 'static,
E: Into<Error> + 'static,
{
fn from(s: BodyStream<S, E>) -> Body {
Body::from_message(s)
}
}
impl MessageBody for Bytes {
fn size(&self) -> BodySize {
BodySize::Sized(self.len())
}
fn poll_next(&mut self, _: &mut Context) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::replace(self, Bytes::new()))))
}
}
}
impl MessageBody for BytesMut {
fn size(&self) -> BodySize {
BodySize::Sized(self.len())
}
fn poll_next(&mut self, _: &mut Context) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::replace(self, BytesMut::new()).freeze())))
}
}
}
impl MessageBody for &'static str {
fn size(&self) -> BodySize {
BodySize::Sized(self.len())
}
fn poll_next(&mut self, _: &mut Context) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from_static(
mem::replace(self, "").as_ref(),
))))
}
}
}
impl MessageBody for &'static [u8] {
fn size(&self) -> BodySize {
BodySize::Sized(self.len())
}
fn poll_next(&mut self, _: &mut Context) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from_static(mem::replace(self, b"")))))
}
}
}
impl MessageBody for Vec<u8> {
fn size(&self) -> BodySize {
BodySize::Sized(self.len())
}
fn poll_next(&mut self, _: &mut Context) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from(mem::replace(self, Vec::new())))))
}
}
}
impl MessageBody for String {
fn size(&self) -> BodySize {
BodySize::Sized(self.len())
}
fn poll_next(&mut self, _: &mut Context) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from(
mem::replace(self, String::new()).into_bytes(),
))))
}
}
}
/// Type represent streaming body.
/// Response does not contain `content-length` header and appropriate transfer encoding is used.
#[pin_project]
pub struct BodyStream<S, E> {
#[pin]
stream: S,
_t: PhantomData<E>,
}
impl<S, E> BodyStream<S, E>
where
S: Stream<Item = Result<Bytes, E>>,
E: Into<Error>,
{
pub fn new(stream: S) -> Self {
BodyStream {
stream,
_t: PhantomData,
}
}
}
impl<S, E> MessageBody for BodyStream<S, E>
where
S: Stream<Item = Result<Bytes, E>>,
E: Into<Error>,
{
fn size(&self) -> BodySize {
BodySize::Stream
}
fn poll_next(&mut self, cx: &mut Context) -> Poll<Option<Result<Bytes, Error>>> {
unsafe { Pin::new_unchecked(self) }
.project()
.stream
.poll_next(cx)
.map(|res| res.map(|res| res.map_err(std::convert::Into::into)))
}
}
/// Type represent streaming body. This body implementation should be used
/// if total size of stream is known. Data get sent as is without using transfer encoding.
#[pin_project]
pub struct SizedStream<S> {
size: u64,
#[pin]
stream: S,
}
impl<S> SizedStream<S>
where
S: Stream<Item = Result<Bytes, Error>>,
{
pub fn new(size: u64, stream: S) -> Self {
SizedStream { size, stream }
}
}
impl<S> MessageBody for SizedStream<S>
where
S: Stream<Item = Result<Bytes, Error>>,
{
fn size(&self) -> BodySize {
BodySize::Sized64(self.size)
}
fn poll_next(&mut self, cx: &mut Context) -> Poll<Option<Result<Bytes, Error>>> {
unsafe { Pin::new_unchecked(self) }
.project()
.stream
.poll_next(cx)
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures::future::poll_fn;
impl Body {
pub(crate) fn get_ref(&self) -> &[u8] {
match *self {
Body::Bytes(ref bin) => &bin,
_ => panic!(),
}
}
}
impl ResponseBody<Body> {
pub(crate) fn get_ref(&self) -> &[u8] {
match *self {
ResponseBody::Body(ref b) => b.get_ref(),
ResponseBody::Other(ref b) => b.get_ref(),
}
}
}
#[actix_rt::test]
async fn test_static_str() {
assert_eq!(Body::from("").size(), BodySize::Sized(0));
assert_eq!(Body::from("test").size(), BodySize::Sized(4));
assert_eq!(Body::from("test").get_ref(), b"test");
assert_eq!("test".size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| "test".poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_static_bytes() {
assert_eq!(Body::from(b"test".as_ref()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b"test".as_ref()).get_ref(), b"test");
assert_eq!(
Body::from_slice(b"test".as_ref()).size(),
BodySize::Sized(4)
);
assert_eq!(Body::from_slice(b"test".as_ref()).get_ref(), b"test");
assert_eq!((&b"test"[..]).size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| (&b"test"[..]).poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_vec() {
assert_eq!(Body::from(Vec::from("test")).size(), BodySize::Sized(4));
assert_eq!(Body::from(Vec::from("test")).get_ref(), b"test");
assert_eq!(Vec::from("test").size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| Vec::from("test").poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_bytes() {
let mut b = Bytes::from("test");
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_bytes_mut() {
let mut b = BytesMut::from("test");
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_string() {
let mut b = "test".to_owned();
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
assert_eq!(Body::from(&b).size(), BodySize::Sized(4));
assert_eq!(Body::from(&b).get_ref(), b"test");
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_unit() {
assert_eq!(().size(), BodySize::Empty);
assert!(poll_fn(|cx| ().poll_next(cx)).await.is_none());
}
#[actix_rt::test]
async fn test_box() {
let mut val = Box::new(());
assert_eq!(val.size(), BodySize::Empty);
assert!(poll_fn(|cx| val.poll_next(cx)).await.is_none());
}
#[actix_rt::test]
async fn test_body_eq() {
assert!(Body::None == Body::None);
assert!(Body::None != Body::Empty);
assert!(Body::Empty == Body::Empty);
assert!(Body::Empty != Body::None);
assert!(
Body::Bytes(Bytes::from_static(b"1"))
== Body::Bytes(Bytes::from_static(b"1"))
);
assert!(Body::Bytes(Bytes::from_static(b"1")) != Body::None);
}
#[actix_rt::test]
async fn test_body_debug() {
assert!(format!("{:?}", Body::None).contains("Body::None"));
assert!(format!("{:?}", Body::Empty).contains("Body::Empty"));
assert!(format!("{:?}", Body::Bytes(Bytes::from_static(b"1"))).contains("1"));
}
#[actix_rt::test]
async fn test_serde_json() {
use serde_json::json;
assert_eq!(
Body::from(serde_json::Value::String("test".into())).size(),
BodySize::Sized(6)
);
assert_eq!(
Body::from(json!({"test-key":"test-value"})).size(),
BodySize::Sized(25)
);
}
}

230
actix-http/src/builder.rs Normal file
View File

@ -0,0 +1,230 @@
use std::fmt;
use std::marker::PhantomData;
use std::rc::Rc;
use actix_codec::Framed;
use actix_server_config::ServerConfig as SrvConfig;
use actix_service::{IntoServiceFactory, Service, ServiceFactory};
use crate::body::MessageBody;
use crate::config::{KeepAlive, ServiceConfig};
use crate::error::Error;
use crate::h1::{Codec, ExpectHandler, H1Service, UpgradeHandler};
use crate::h2::H2Service;
use crate::helpers::{Data, DataFactory};
use crate::request::Request;
use crate::response::Response;
use crate::service::HttpService;
/// A http service builder
///
/// This type can be used to construct an instance of `http service` through a
/// builder-like pattern.
pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler<T>> {
keep_alive: KeepAlive,
client_timeout: u64,
client_disconnect: u64,
expect: X,
upgrade: Option<U>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
_t: PhantomData<(T, S)>,
}
impl<T, S> HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler<T>>
where
S: ServiceFactory<Config = SrvConfig, Request = Request>,
S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug,
<S::Service as Service>::Future: 'static,
{
/// Create instance of `ServiceConfigBuilder`
pub fn new() -> Self {
HttpServiceBuilder {
keep_alive: KeepAlive::Timeout(5),
client_timeout: 5000,
client_disconnect: 0,
expect: ExpectHandler,
upgrade: None,
on_connect: None,
_t: PhantomData,
}
}
}
impl<T, S, X, U> HttpServiceBuilder<T, S, X, U>
where
S: ServiceFactory<Config = SrvConfig, Request = Request>,
S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug,
<S::Service as Service>::Future: 'static,
X: ServiceFactory<Config = SrvConfig, Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
<X::Service as Service>::Future: 'static,
U: ServiceFactory<
Config = SrvConfig,
Request = (Request, Framed<T, Codec>),
Response = (),
>,
U::Error: fmt::Display,
U::InitError: fmt::Debug,
<U::Service as Service>::Future: 'static,
{
/// Set server keep-alive setting.
///
/// By default keep alive is set to a 5 seconds.
pub fn keep_alive<W: Into<KeepAlive>>(mut self, val: W) -> Self {
self.keep_alive = val.into();
self
}
/// Set server client timeout in milliseconds for first request.
///
/// Defines a timeout for reading client request header. If a client does not transmit
/// the entire set headers within this time, the request is terminated with
/// the 408 (Request Time-out) error.
///
/// To disable timeout set value to 0.
///
/// By default client timeout is set to 5000 milliseconds.
pub fn client_timeout(mut self, val: u64) -> Self {
self.client_timeout = val;
self
}
/// Set server connection disconnect timeout in milliseconds.
///
/// Defines a timeout for disconnect connection. If a disconnect procedure does not complete
/// within this time, the request get dropped. This timeout affects secure connections.
///
/// To disable timeout set value to 0.
///
/// By default disconnect timeout is set to 0.
pub fn client_disconnect(mut self, val: u64) -> Self {
self.client_disconnect = val;
self
}
/// Provide service for `EXPECT: 100-Continue` support.
///
/// Service get called with request that contains `EXPECT` header.
/// Service must return request in case of success, in that case
/// request will be forwarded to main service.
pub fn expect<F, X1>(self, expect: F) -> HttpServiceBuilder<T, S, X1, U>
where
F: IntoServiceFactory<X1>,
X1: ServiceFactory<Config = SrvConfig, Request = Request, Response = Request>,
X1::Error: Into<Error>,
X1::InitError: fmt::Debug,
<X1::Service as Service>::Future: 'static,
{
HttpServiceBuilder {
keep_alive: self.keep_alive,
client_timeout: self.client_timeout,
client_disconnect: self.client_disconnect,
expect: expect.into_factory(),
upgrade: self.upgrade,
on_connect: self.on_connect,
_t: PhantomData,
}
}
/// Provide service for custom `Connection: UPGRADE` support.
///
/// If service is provided then normal requests handling get halted
/// and this service get called with original request and framed object.
pub fn upgrade<F, U1>(self, upgrade: F) -> HttpServiceBuilder<T, S, X, U1>
where
F: IntoServiceFactory<U1>,
U1: ServiceFactory<
Config = SrvConfig,
Request = (Request, Framed<T, Codec>),
Response = (),
>,
U1::Error: fmt::Display,
U1::InitError: fmt::Debug,
<U1::Service as Service>::Future: 'static,
{
HttpServiceBuilder {
keep_alive: self.keep_alive,
client_timeout: self.client_timeout,
client_disconnect: self.client_disconnect,
expect: self.expect,
upgrade: Some(upgrade.into_factory()),
on_connect: self.on_connect,
_t: PhantomData,
}
}
/// Set on-connect callback.
///
/// It get called once per connection and result of the call
/// get stored to the request's extensions.
pub fn on_connect<F, I>(mut self, f: F) -> Self
where
F: Fn(&T) -> I + 'static,
I: Clone + 'static,
{
self.on_connect = Some(Rc::new(move |io| Box::new(Data(f(io)))));
self
}
/// Finish service configuration and create *http service* for HTTP/1 protocol.
pub fn h1<F, P, B>(self, service: F) -> H1Service<T, P, S, B, X, U>
where
B: MessageBody,
F: IntoServiceFactory<S>,
S::Error: Into<Error>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
{
let cfg = ServiceConfig::new(
self.keep_alive,
self.client_timeout,
self.client_disconnect,
);
H1Service::with_config(cfg, service.into_factory())
.expect(self.expect)
.upgrade(self.upgrade)
.on_connect(self.on_connect)
}
/// Finish service configuration and create *http service* for HTTP/2 protocol.
pub fn h2<F, P, B>(self, service: F) -> H2Service<T, P, S, B>
where
B: MessageBody + 'static,
F: IntoServiceFactory<S>,
S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
{
let cfg = ServiceConfig::new(
self.keep_alive,
self.client_timeout,
self.client_disconnect,
);
H2Service::with_config(cfg, service.into_factory()).on_connect(self.on_connect)
}
/// Finish service configuration and create `HttpService` instance.
pub fn finish<F, P, B>(self, service: F) -> HttpService<T, P, S, B, X, U>
where
B: MessageBody + 'static,
F: IntoServiceFactory<S>,
S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
{
let cfg = ServiceConfig::new(
self.keep_alive,
self.client_timeout,
self.client_disconnect,
);
HttpService::with_config(cfg, service.into_factory())
.expect(self.expect)
.upgrade(self.upgrade)
.on_connect(self.on_connect)
}
}

View File

@ -0,0 +1,293 @@
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, io, time};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use bytes::{Buf, Bytes};
use futures::future::{err, Either, Future, FutureExt, LocalBoxFuture, Ready};
use h2::client::SendRequest;
use pin_project::{pin_project, project};
use crate::body::MessageBody;
use crate::h1::ClientCodec;
use crate::message::{RequestHeadType, ResponseHead};
use crate::payload::Payload;
use super::error::SendRequestError;
use super::pool::{Acquired, Protocol};
use super::{h1proto, h2proto};
pub(crate) enum ConnectionType<Io> {
H1(Io),
H2(SendRequest<Bytes>),
}
pub trait Connection {
type Io: AsyncRead + AsyncWrite + Unpin;
type Future: Future<Output = Result<(ResponseHead, Payload), SendRequestError>>;
fn protocol(&self) -> Protocol;
/// Send request and body
fn send_request<B: MessageBody + 'static, H: Into<RequestHeadType>>(
self,
head: H,
body: B,
) -> Self::Future;
type TunnelFuture: Future<
Output = Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
>;
/// Send request, returns Response and Framed
fn open_tunnel<H: Into<RequestHeadType>>(self, head: H) -> Self::TunnelFuture;
}
pub(crate) trait ConnectionLifetime: AsyncRead + AsyncWrite + 'static {
/// Close connection
fn close(&mut self);
/// Release connection to the connection pool
fn release(&mut self);
}
#[doc(hidden)]
/// HTTP client connection
pub struct IoConnection<T> {
io: Option<ConnectionType<T>>,
created: time::Instant,
pool: Option<Acquired<T>>,
}
impl<T> fmt::Debug for IoConnection<T>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.io {
Some(ConnectionType::H1(ref io)) => write!(f, "H1Connection({:?})", io),
Some(ConnectionType::H2(_)) => write!(f, "H2Connection"),
None => write!(f, "Connection(Empty)"),
}
}
}
impl<T: AsyncRead + AsyncWrite + Unpin> IoConnection<T> {
pub(crate) fn new(
io: ConnectionType<T>,
created: time::Instant,
pool: Option<Acquired<T>>,
) -> Self {
IoConnection {
pool,
created,
io: Some(io),
}
}
pub(crate) fn into_inner(self) -> (ConnectionType<T>, time::Instant) {
(self.io.unwrap(), self.created)
}
}
impl<T> Connection for IoConnection<T>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Io = T;
type Future =
LocalBoxFuture<'static, Result<(ResponseHead, Payload), SendRequestError>>;
fn protocol(&self) -> Protocol {
match self.io {
Some(ConnectionType::H1(_)) => Protocol::Http1,
Some(ConnectionType::H2(_)) => Protocol::Http2,
None => Protocol::Http1,
}
}
fn send_request<B: MessageBody + 'static, H: Into<RequestHeadType>>(
mut self,
head: H,
body: B,
) -> Self::Future {
match self.io.take().unwrap() {
ConnectionType::H1(io) => {
h1proto::send_request(io, head.into(), body, self.created, self.pool)
.boxed_local()
}
ConnectionType::H2(io) => {
h2proto::send_request(io, head.into(), body, self.created, self.pool)
.boxed_local()
}
}
}
type TunnelFuture = Either<
LocalBoxFuture<
'static,
Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
>,
Ready<Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>>,
>;
/// Send request, returns Response and Framed
fn open_tunnel<H: Into<RequestHeadType>>(mut self, head: H) -> Self::TunnelFuture {
match self.io.take().unwrap() {
ConnectionType::H1(io) => {
Either::Left(h1proto::open_tunnel(io, head.into()).boxed_local())
}
ConnectionType::H2(io) => {
if let Some(mut pool) = self.pool.take() {
pool.release(IoConnection::new(
ConnectionType::H2(io),
self.created,
None,
));
}
Either::Right(err(SendRequestError::TunnelNotSupported))
}
}
}
}
#[allow(dead_code)]
pub(crate) enum EitherConnection<A, B> {
A(IoConnection<A>),
B(IoConnection<B>),
}
impl<A, B> Connection for EitherConnection<A, B>
where
A: AsyncRead + AsyncWrite + Unpin + 'static,
B: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Io = EitherIo<A, B>;
type Future =
LocalBoxFuture<'static, Result<(ResponseHead, Payload), SendRequestError>>;
fn protocol(&self) -> Protocol {
match self {
EitherConnection::A(con) => con.protocol(),
EitherConnection::B(con) => con.protocol(),
}
}
fn send_request<RB: MessageBody + 'static, H: Into<RequestHeadType>>(
self,
head: H,
body: RB,
) -> Self::Future {
match self {
EitherConnection::A(con) => con.send_request(head, body),
EitherConnection::B(con) => con.send_request(head, body),
}
}
type TunnelFuture = LocalBoxFuture<
'static,
Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
>;
/// Send request, returns Response and Framed
fn open_tunnel<H: Into<RequestHeadType>>(self, head: H) -> Self::TunnelFuture {
match self {
EitherConnection::A(con) => con
.open_tunnel(head)
.map(|res| res.map(|(head, framed)| (head, framed.map_io(EitherIo::A))))
.boxed_local(),
EitherConnection::B(con) => con
.open_tunnel(head)
.map(|res| res.map(|(head, framed)| (head, framed.map_io(EitherIo::B))))
.boxed_local(),
}
}
}
#[pin_project]
pub enum EitherIo<A, B> {
A(#[pin] A),
B(#[pin] B),
}
impl<A, B> AsyncRead for EitherIo<A, B>
where
A: AsyncRead,
B: AsyncRead,
{
#[project]
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
#[project]
match self.project() {
EitherIo::A(val) => val.poll_read(cx, buf),
EitherIo::B(val) => val.poll_read(cx, buf),
}
}
unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool {
match self {
EitherIo::A(ref val) => val.prepare_uninitialized_buffer(buf),
EitherIo::B(ref val) => val.prepare_uninitialized_buffer(buf),
}
}
}
impl<A, B> AsyncWrite for EitherIo<A, B>
where
A: AsyncWrite,
B: AsyncWrite,
{
#[project]
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context,
buf: &[u8],
) -> Poll<io::Result<usize>> {
#[project]
match self.project() {
EitherIo::A(val) => val.poll_write(cx, buf),
EitherIo::B(val) => val.poll_write(cx, buf),
}
}
#[project]
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
#[project]
match self.project() {
EitherIo::A(val) => val.poll_flush(cx),
EitherIo::B(val) => val.poll_flush(cx),
}
}
#[project]
fn poll_shutdown(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<io::Result<()>> {
#[project]
match self.project() {
EitherIo::A(val) => val.poll_shutdown(cx),
EitherIo::B(val) => val.poll_shutdown(cx),
}
}
#[project]
fn poll_write_buf<U: Buf>(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut U,
) -> Poll<Result<usize, io::Error>>
where
Self: Sized,
{
#[project]
match self.project() {
EitherIo::A(val) => val.poll_write_buf(cx, buf),
EitherIo::B(val) => val.poll_write_buf(cx, buf),
}
}
}

View File

@ -0,0 +1,531 @@
use std::fmt;
use std::marker::PhantomData;
use std::time::Duration;
use actix_codec::{AsyncRead, AsyncWrite};
use actix_connect::{
default_connector, Connect as TcpConnect, Connection as TcpConnection,
};
use actix_service::{apply_fn, Service};
use actix_utils::timeout::{TimeoutError, TimeoutService};
use http::Uri;
use tokio_net::tcp::TcpStream;
use super::connection::Connection;
use super::error::ConnectError;
use super::pool::{ConnectionPool, Protocol};
use super::Connect;
#[cfg(feature = "openssl")]
use open_ssl::ssl::SslConnector as OpensslConnector;
#[cfg(feature = "rustls")]
use rust_tls::ClientConfig;
#[cfg(feature = "rustls")]
use std::sync::Arc;
#[cfg(any(feature = "openssl", feature = "rustls"))]
enum SslConnector {
#[cfg(feature = "openssl")]
Openssl(OpensslConnector),
#[cfg(feature = "rustls")]
Rustls(Arc<ClientConfig>),
}
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
type SslConnector = ();
/// Manages http client network connectivity
/// The `Connector` type uses a builder-like combinator pattern for service
/// construction that finishes by calling the `.finish()` method.
///
/// ```rust,ignore
/// use std::time::Duration;
/// use actix_http::client::Connector;
///
/// let connector = Connector::new()
/// .timeout(Duration::from_secs(5))
/// .finish();
/// ```
pub struct Connector<T, U> {
connector: T,
timeout: Duration,
conn_lifetime: Duration,
conn_keep_alive: Duration,
disconnect_timeout: Duration,
limit: usize,
#[allow(dead_code)]
ssl: SslConnector,
_t: PhantomData<U>,
}
trait Io: AsyncRead + AsyncWrite + Unpin {}
impl<T: AsyncRead + AsyncWrite + Unpin> Io for T {}
impl Connector<(), ()> {
#[allow(clippy::new_ret_no_self)]
pub fn new() -> Connector<
impl Service<
Request = TcpConnect<Uri>,
Response = TcpConnection<Uri, TcpStream>,
Error = actix_connect::ConnectError,
> + Clone,
TcpStream,
> {
let ssl = {
#[cfg(feature = "openssl")]
{
use open_ssl::ssl::SslMethod;
let mut ssl = OpensslConnector::builder(SslMethod::tls()).unwrap();
let _ = ssl
.set_alpn_protos(b"\x02h2\x08http/1.1")
.map_err(|e| error!("Can not set alpn protocol: {:?}", e));
SslConnector::Openssl(ssl.build())
}
#[cfg(all(not(feature = "openssl"), feature = "rustls"))]
{
let protos = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
let mut config = ClientConfig::new();
config.set_protocols(&protos);
config
.root_store
.add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS);
SslConnector::Rustls(Arc::new(config))
}
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
{}
};
Connector {
ssl,
connector: default_connector(),
timeout: Duration::from_secs(1),
conn_lifetime: Duration::from_secs(75),
conn_keep_alive: Duration::from_secs(15),
disconnect_timeout: Duration::from_millis(3000),
limit: 100,
_t: PhantomData,
}
}
}
impl<T, U> Connector<T, U> {
/// Use custom connector.
pub fn connector<T1, U1>(self, connector: T1) -> Connector<T1, U1>
where
U1: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
T1: Service<
Request = TcpConnect<Uri>,
Response = TcpConnection<Uri, U1>,
Error = actix_connect::ConnectError,
> + Clone,
{
Connector {
connector,
timeout: self.timeout,
conn_lifetime: self.conn_lifetime,
conn_keep_alive: self.conn_keep_alive,
disconnect_timeout: self.disconnect_timeout,
limit: self.limit,
ssl: self.ssl,
_t: PhantomData,
}
}
}
impl<T, U> Connector<T, U>
where
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
T: Service<
Request = TcpConnect<Uri>,
Response = TcpConnection<Uri, U>,
Error = actix_connect::ConnectError,
> + Clone
+ 'static,
{
/// Connection timeout, i.e. max time to connect to remote host including dns name resolution.
/// Set to 1 second by default.
pub fn timeout(mut self, timeout: Duration) -> Self {
self.timeout = timeout;
self
}
#[cfg(feature = "openssl")]
/// Use custom `SslConnector` instance.
pub fn ssl(mut self, connector: OpensslConnector) -> Self {
self.ssl = SslConnector::Openssl(connector);
self
}
#[cfg(feature = "rustls")]
pub fn rustls(mut self, connector: Arc<ClientConfig>) -> Self {
self.ssl = SslConnector::Rustls(connector);
self
}
/// Set total number of simultaneous connections per type of scheme.
///
/// If limit is 0, the connector has no limit.
/// The default limit size is 100.
pub fn limit(mut self, limit: usize) -> Self {
self.limit = limit;
self
}
/// Set keep-alive period for opened connection.
///
/// Keep-alive period is the period between connection usage. If
/// the delay between repeated usages of the same connection
/// exceeds this period, the connection is closed.
/// Default keep-alive period is 15 seconds.
pub fn conn_keep_alive(mut self, dur: Duration) -> Self {
self.conn_keep_alive = dur;
self
}
/// Set max lifetime period for connection.
///
/// Connection lifetime is max lifetime of any opened connection
/// until it is closed regardless of keep-alive period.
/// Default lifetime period is 75 seconds.
pub fn conn_lifetime(mut self, dur: Duration) -> Self {
self.conn_lifetime = dur;
self
}
/// Set server connection disconnect timeout in milliseconds.
///
/// Defines a timeout for disconnect connection. If a disconnect procedure does not complete
/// within this time, the socket get dropped. This timeout affects only secure connections.
///
/// To disable timeout set value to 0.
///
/// By default disconnect timeout is set to 3000 milliseconds.
pub fn disconnect_timeout(mut self, dur: Duration) -> Self {
self.disconnect_timeout = dur;
self
}
/// Finish configuration process and create connector service.
/// The Connector builder always concludes by calling `finish()` last in
/// its combinator chain.
pub fn finish(
self,
) -> impl Service<Request = Connect, Response = impl Connection, Error = ConnectError>
+ Clone {
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
{
let connector = TimeoutService::new(
self.timeout,
apply_fn(self.connector, |msg: Connect, srv| {
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
})
.map_err(ConnectError::from)
.map(|stream| (stream.into_parts().0, Protocol::Http1)),
)
.map_err(|e| match e {
TimeoutError::Service(e) => e,
TimeoutError::Timeout => ConnectError::Timeout,
});
connect_impl::InnerConnector {
tcp_pool: ConnectionPool::new(
connector,
self.conn_lifetime,
self.conn_keep_alive,
None,
self.limit,
),
}
}
#[cfg(any(feature = "openssl", feature = "rustls"))]
{
const H2: &[u8] = b"h2";
#[cfg(feature = "openssl")]
use actix_connect::ssl::OpensslConnector;
#[cfg(feature = "rustls")]
use actix_connect::ssl::RustlsConnector;
use actix_service::{boxed::service, pipeline};
#[cfg(feature = "rustls")]
use rust_tls::Session;
let ssl_service = TimeoutService::new(
self.timeout,
pipeline(
apply_fn(self.connector.clone(), |msg: Connect, srv| {
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
})
.map_err(ConnectError::from),
)
.and_then(match self.ssl {
#[cfg(feature = "openssl")]
SslConnector::Openssl(ssl) => service(
OpensslConnector::service(ssl)
.map(|stream| {
let sock = stream.into_parts().0;
let h2 = sock
.ssl()
.selected_alpn_protocol()
.map(|protos| protos.windows(2).any(|w| w == H2))
.unwrap_or(false);
if h2 {
(Box::new(sock) as Box<dyn Io>, Protocol::Http2)
} else {
(Box::new(sock) as Box<dyn Io>, Protocol::Http1)
}
})
.map_err(ConnectError::from),
),
#[cfg(feature = "rustls")]
SslConnector::Rustls(ssl) => service(
RustlsConnector::service(ssl)
.map_err(ConnectError::from)
.map(|stream| {
let sock = stream.into_parts().0;
let h2 = sock
.get_ref()
.1
.get_alpn_protocol()
.map(|protos| protos.windows(2).any(|w| w == H2))
.unwrap_or(false);
if h2 {
(Box::new(sock) as Box<dyn Io>, Protocol::Http2)
} else {
(Box::new(sock) as Box<dyn Io>, Protocol::Http1)
}
}),
),
}),
)
.map_err(|e| match e {
TimeoutError::Service(e) => e,
TimeoutError::Timeout => ConnectError::Timeout,
});
let tcp_service = TimeoutService::new(
self.timeout,
apply_fn(self.connector, |msg: Connect, srv| {
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
})
.map_err(ConnectError::from)
.map(|stream| (stream.into_parts().0, Protocol::Http1)),
)
.map_err(|e| match e {
TimeoutError::Service(e) => e,
TimeoutError::Timeout => ConnectError::Timeout,
});
connect_impl::InnerConnector {
tcp_pool: ConnectionPool::new(
tcp_service,
self.conn_lifetime,
self.conn_keep_alive,
None,
self.limit,
),
ssl_pool: ConnectionPool::new(
ssl_service,
self.conn_lifetime,
self.conn_keep_alive,
Some(self.disconnect_timeout),
self.limit,
),
}
}
}
}
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
mod connect_impl {
use std::task::{Context, Poll};
use futures::future::{err, Either, Ready};
use super::*;
use crate::client::connection::IoConnection;
pub(crate) struct InnerConnector<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
pub(crate) tcp_pool: ConnectionPool<T, Io>,
}
impl<T, Io> Clone for InnerConnector<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
fn clone(&self) -> Self {
InnerConnector {
tcp_pool: self.tcp_pool.clone(),
}
}
}
impl<T, Io> Service for InnerConnector<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
type Request = Connect;
type Response = IoConnection<Io>;
type Error = ConnectError;
type Future = Either<
<ConnectionPool<T, Io> as Service>::Future,
Ready<Result<IoConnection<Io>, ConnectError>>,
>;
fn poll_ready(&mut self, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
self.tcp_pool.poll_ready(cx)
}
fn call(&mut self, req: Connect) -> Self::Future {
match req.uri.scheme_str() {
Some("https") | Some("wss") => {
Either::Right(err(ConnectError::SslIsNotSupported))
}
_ => Either::Left(self.tcp_pool.call(req)),
}
}
}
}
#[cfg(any(feature = "openssl", feature = "rustls"))]
mod connect_impl {
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use futures::future::Either;
use futures::ready;
use super::*;
use crate::client::connection::EitherConnection;
pub(crate) struct InnerConnector<T1, T2, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>,
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>,
{
pub(crate) tcp_pool: ConnectionPool<T1, Io1>,
pub(crate) ssl_pool: ConnectionPool<T2, Io2>,
}
impl<T1, T2, Io1, Io2> Clone for InnerConnector<T1, T2, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
+ 'static,
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
{
fn clone(&self) -> Self {
InnerConnector {
tcp_pool: self.tcp_pool.clone(),
ssl_pool: self.ssl_pool.clone(),
}
}
}
impl<T1, T2, Io1, Io2> Service for InnerConnector<T1, T2, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
+ 'static,
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
{
type Request = Connect;
type Response = EitherConnection<Io1, Io2>;
type Error = ConnectError;
type Future = Either<
InnerConnectorResponseA<T1, Io1, Io2>,
InnerConnectorResponseB<T2, Io1, Io2>,
>;
fn poll_ready(&mut self, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
self.tcp_pool.poll_ready(cx)
}
fn call(&mut self, req: Connect) -> Self::Future {
match req.uri.scheme_str() {
Some("https") | Some("wss") => Either::Right(InnerConnectorResponseB {
fut: self.ssl_pool.call(req),
_t: PhantomData,
}),
_ => Either::Left(InnerConnectorResponseA {
fut: self.tcp_pool.call(req),
_t: PhantomData,
}),
}
}
}
#[pin_project::pin_project]
pub(crate) struct InnerConnectorResponseA<T, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
+ 'static,
{
#[pin]
fut: <ConnectionPool<T, Io1> as Service>::Future,
_t: PhantomData<Io2>,
}
impl<T, Io1, Io2> Future for InnerConnectorResponseA<T, Io1, Io2>
where
T: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
+ 'static,
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Output = Result<EitherConnection<Io1, Io2>, ConnectError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
Poll::Ready(
ready!(Pin::new(&mut self.get_mut().fut).poll(cx))
.map(|res| EitherConnection::A(res)),
)
}
}
#[pin_project::pin_project]
pub(crate) struct InnerConnectorResponseB<T, Io1, Io2>
where
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
{
#[pin]
fut: <ConnectionPool<T, Io2> as Service>::Future,
_t: PhantomData<Io1>,
}
impl<T, Io1, Io2> Future for InnerConnectorResponseB<T, Io1, Io2>
where
T: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Output = Result<EitherConnection<Io1, Io2>, ConnectError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
Poll::Ready(
ready!(Pin::new(&mut self.get_mut().fut).poll(cx))
.map(|res| EitherConnection::B(res)),
)
}
}
}

View File

@ -0,0 +1,148 @@
use std::io;
use derive_more::{Display, From};
use trust_dns_resolver::error::ResolveError;
#[cfg(feature = "openssl")]
use open_ssl::ssl::{Error as SslError, HandshakeError};
use crate::error::{Error, ParseError, ResponseError};
use crate::http::{Error as HttpError, StatusCode};
/// A set of errors that can occur while connecting to an HTTP host
#[derive(Debug, Display, From)]
pub enum ConnectError {
/// SSL feature is not enabled
#[display(fmt = "SSL is not supported")]
SslIsNotSupported,
/// SSL error
#[cfg(feature = "openssl")]
#[display(fmt = "{}", _0)]
SslError(SslError),
/// Failed to resolve the hostname
#[display(fmt = "Failed resolving hostname: {}", _0)]
Resolver(ResolveError),
/// No dns records
#[display(fmt = "No dns records found for the input")]
NoRecords,
/// Http2 error
#[display(fmt = "{}", _0)]
H2(h2::Error),
/// Connecting took too long
#[display(fmt = "Timeout out while establishing connection")]
Timeout,
/// Connector has been disconnected
#[display(fmt = "Internal error: connector has been disconnected")]
Disconnected,
/// Unresolved host name
#[display(fmt = "Connector received `Connect` method with unresolved host")]
Unresolverd,
/// Connection io error
#[display(fmt = "{}", _0)]
Io(io::Error),
}
impl From<actix_connect::ConnectError> for ConnectError {
fn from(err: actix_connect::ConnectError) -> ConnectError {
match err {
actix_connect::ConnectError::Resolver(e) => ConnectError::Resolver(e),
actix_connect::ConnectError::NoRecords => ConnectError::NoRecords,
actix_connect::ConnectError::InvalidInput => panic!(),
actix_connect::ConnectError::Unresolverd => ConnectError::Unresolverd,
actix_connect::ConnectError::Io(e) => ConnectError::Io(e),
}
}
}
#[cfg(feature = "openssl")]
impl<T> From<HandshakeError<T>> for ConnectError {
fn from(err: HandshakeError<T>) -> ConnectError {
match err {
HandshakeError::SetupFailure(stack) => SslError::from(stack).into(),
HandshakeError::Failure(stream) => stream.into_error().into(),
HandshakeError::WouldBlock(stream) => stream.into_error().into(),
}
}
}
#[derive(Debug, Display, From)]
pub enum InvalidUrl {
#[display(fmt = "Missing url scheme")]
MissingScheme,
#[display(fmt = "Unknown url scheme")]
UnknownScheme,
#[display(fmt = "Missing host name")]
MissingHost,
#[display(fmt = "Url parse error: {}", _0)]
HttpError(http::Error),
}
/// A set of errors that can occur during request sending and response reading
#[derive(Debug, Display, From)]
pub enum SendRequestError {
/// Invalid URL
#[display(fmt = "Invalid URL: {}", _0)]
Url(InvalidUrl),
/// Failed to connect to host
#[display(fmt = "Failed to connect to host: {}", _0)]
Connect(ConnectError),
/// Error sending request
Send(io::Error),
/// Error parsing response
Response(ParseError),
/// Http error
#[display(fmt = "{}", _0)]
Http(HttpError),
/// Http2 error
#[display(fmt = "{}", _0)]
H2(h2::Error),
/// Response took too long
#[display(fmt = "Timeout out while waiting for response")]
Timeout,
/// Tunnels are not supported for http2 connection
#[display(fmt = "Tunnels are not supported for http2 connection")]
TunnelNotSupported,
/// Error sending request body
Body(Error),
}
/// Convert `SendRequestError` to a server `Response`
impl ResponseError for SendRequestError {
fn status_code(&self) -> StatusCode {
match *self {
SendRequestError::Connect(ConnectError::Timeout) => {
StatusCode::GATEWAY_TIMEOUT
}
SendRequestError::Connect(_) => StatusCode::BAD_REQUEST,
_ => StatusCode::INTERNAL_SERVER_ERROR,
}
}
}
/// A set of errors that can occur during freezing a request
#[derive(Debug, Display, From)]
pub enum FreezeRequestError {
/// Invalid URL
#[display(fmt = "Invalid URL: {}", _0)]
Url(InvalidUrl),
/// Http error
#[display(fmt = "{}", _0)]
Http(HttpError),
}
impl From<FreezeRequestError> for SendRequestError {
fn from(e: FreezeRequestError) -> Self {
match e {
FreezeRequestError::Url(e) => e.into(),
FreezeRequestError::Http(e) => e.into(),
}
}
}

View File

@ -0,0 +1,284 @@
use std::io::Write;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{io, time};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use bytes::{BufMut, Bytes, BytesMut};
use futures::future::poll_fn;
use futures::{SinkExt, Stream, StreamExt};
use crate::error::PayloadError;
use crate::h1;
use crate::header::HeaderMap;
use crate::http::header::{IntoHeaderValue, HOST};
use crate::message::{RequestHeadType, ResponseHead};
use crate::payload::{Payload, PayloadStream};
use super::connection::{ConnectionLifetime, ConnectionType, IoConnection};
use super::error::{ConnectError, SendRequestError};
use super::pool::Acquired;
use crate::body::{BodySize, MessageBody};
pub(crate) async fn send_request<T, B>(
io: T,
mut head: RequestHeadType,
body: B,
created: time::Instant,
pool: Option<Acquired<T>>,
) -> Result<(ResponseHead, Payload), SendRequestError>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
B: MessageBody,
{
// set request host header
if !head.as_ref().headers.contains_key(HOST)
&& !head.extra_headers().iter().any(|h| h.contains_key(HOST))
{
if let Some(host) = head.as_ref().uri.host() {
let mut wrt = BytesMut::with_capacity(host.len() + 5).writer();
let _ = match head.as_ref().uri.port_u16() {
None | Some(80) | Some(443) => write!(wrt, "{}", host),
Some(port) => write!(wrt, "{}:{}", host, port),
};
match wrt.get_mut().take().freeze().try_into() {
Ok(value) => match head {
RequestHeadType::Owned(ref mut head) => {
head.headers.insert(HOST, value)
}
RequestHeadType::Rc(_, ref mut extra_headers) => {
let headers = extra_headers.get_or_insert(HeaderMap::new());
headers.insert(HOST, value)
}
},
Err(e) => log::error!("Can not set HOST header {}", e),
}
}
}
let io = H1Connection {
created,
pool,
io: Some(io),
};
// create Framed and send request
let mut framed = Framed::new(io, h1::ClientCodec::default());
framed.send((head, body.size()).into()).await?;
// send request body
match body.size() {
BodySize::None | BodySize::Empty | BodySize::Sized(0) => (),
_ => send_body(body, &mut framed).await?,
};
// read response and init read body
let res = framed.into_future().await;
let (head, framed) = if let (Some(result), framed) = res {
let item = result.map_err(SendRequestError::from)?;
(item, framed)
} else {
return Err(SendRequestError::from(ConnectError::Disconnected));
};
match framed.get_codec().message_type() {
h1::MessageType::None => {
let force_close = !framed.get_codec().keepalive();
release_connection(framed, force_close);
Ok((head, Payload::None))
}
_ => {
let pl: PayloadStream = PlStream::new(framed).boxed_local();
Ok((head, pl.into()))
}
}
}
pub(crate) async fn open_tunnel<T>(
io: T,
head: RequestHeadType,
) -> Result<(ResponseHead, Framed<T, h1::ClientCodec>), SendRequestError>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
// create Framed and send request
let mut framed = Framed::new(io, h1::ClientCodec::default());
framed.send((head, BodySize::None).into()).await?;
// read response
if let (Some(result), framed) = framed.into_future().await {
let head = result.map_err(SendRequestError::from)?;
Ok((head, framed))
} else {
Err(SendRequestError::from(ConnectError::Disconnected))
}
}
/// send request body to the peer
pub(crate) async fn send_body<I, B>(
mut body: B,
framed: &mut Framed<I, h1::ClientCodec>,
) -> Result<(), SendRequestError>
where
I: ConnectionLifetime,
B: MessageBody,
{
let mut eof = false;
while !eof {
while !eof && !framed.is_write_buf_full() {
match poll_fn(|cx| body.poll_next(cx)).await {
Some(result) => {
framed.write(h1::Message::Chunk(Some(result?)))?;
}
None => {
eof = true;
framed.write(h1::Message::Chunk(None))?;
}
}
}
if !framed.is_write_buf_empty() {
poll_fn(|cx| match framed.flush(cx) {
Poll::Ready(Ok(_)) => Poll::Ready(Ok(())),
Poll::Ready(Err(err)) => Poll::Ready(Err(err)),
Poll::Pending => {
if !framed.is_write_buf_full() {
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
}
})
.await?;
}
}
SinkExt::flush(framed).await?;
Ok(())
}
#[doc(hidden)]
/// HTTP client connection
pub struct H1Connection<T> {
io: Option<T>,
created: time::Instant,
pool: Option<Acquired<T>>,
}
impl<T> ConnectionLifetime for H1Connection<T>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
/// Close connection
fn close(&mut self) {
if let Some(mut pool) = self.pool.take() {
if let Some(io) = self.io.take() {
pool.close(IoConnection::new(
ConnectionType::H1(io),
self.created,
None,
));
}
}
}
/// Release this connection to the connection pool
fn release(&mut self) {
if let Some(mut pool) = self.pool.take() {
if let Some(io) = self.io.take() {
pool.release(IoConnection::new(
ConnectionType::H1(io),
self.created,
None,
));
}
}
}
}
impl<T: AsyncRead + AsyncWrite + Unpin + 'static> AsyncRead for H1Connection<T> {
unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool {
self.io.as_ref().unwrap().prepare_uninitialized_buffer(buf)
}
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.io.as_mut().unwrap()).poll_read(cx, buf)
}
}
impl<T: AsyncRead + AsyncWrite + Unpin + 'static> AsyncWrite for H1Connection<T> {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.io.as_mut().unwrap()).poll_write(cx, buf)
}
fn poll_flush(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<io::Result<()>> {
Pin::new(self.io.as_mut().unwrap()).poll_flush(cx)
}
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut Context,
) -> Poll<Result<(), io::Error>> {
Pin::new(self.io.as_mut().unwrap()).poll_shutdown(cx)
}
}
pub(crate) struct PlStream<Io> {
framed: Option<Framed<Io, h1::ClientPayloadCodec>>,
}
impl<Io: ConnectionLifetime> PlStream<Io> {
fn new(framed: Framed<Io, h1::ClientCodec>) -> Self {
PlStream {
framed: Some(framed.map_codec(|codec| codec.into_payload_codec())),
}
}
}
impl<Io: ConnectionLifetime> Stream for PlStream<Io> {
type Item = Result<Bytes, PayloadError>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
match this.framed.as_mut().unwrap().next_item(cx)? {
Poll::Pending => Poll::Pending,
Poll::Ready(Some(chunk)) => {
if let Some(chunk) = chunk {
Poll::Ready(Some(Ok(chunk)))
} else {
let framed = this.framed.take().unwrap();
let force_close = !framed.get_codec().keepalive();
release_connection(framed, force_close);
Poll::Ready(None)
}
}
Poll::Ready(None) => Poll::Ready(None),
}
}
}
fn release_connection<T, U>(framed: Framed<T, U>, force_close: bool)
where
T: ConnectionLifetime,
{
let mut parts = framed.into_parts();
if !force_close && parts.read_buf.is_empty() && parts.write_buf.is_empty() {
parts.io.release()
} else {
parts.io.close()
}
}

View File

@ -0,0 +1,184 @@
use std::time;
use actix_codec::{AsyncRead, AsyncWrite};
use bytes::Bytes;
use futures::future::poll_fn;
use h2::{client::SendRequest, SendStream};
use http::header::{HeaderValue, CONNECTION, CONTENT_LENGTH, TRANSFER_ENCODING};
use http::{request::Request, HttpTryFrom, Method, Version};
use crate::body::{BodySize, MessageBody};
use crate::header::HeaderMap;
use crate::message::{RequestHeadType, ResponseHead};
use crate::payload::Payload;
use super::connection::{ConnectionType, IoConnection};
use super::error::SendRequestError;
use super::pool::Acquired;
pub(crate) async fn send_request<T, B>(
mut io: SendRequest<Bytes>,
head: RequestHeadType,
body: B,
created: time::Instant,
pool: Option<Acquired<T>>,
) -> Result<(ResponseHead, Payload), SendRequestError>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
B: MessageBody,
{
trace!("Sending client request: {:?} {:?}", head, body.size());
let head_req = head.as_ref().method == Method::HEAD;
let length = body.size();
let eof = match length {
BodySize::None | BodySize::Empty | BodySize::Sized(0) => true,
_ => false,
};
let mut req = Request::new(());
*req.uri_mut() = head.as_ref().uri.clone();
*req.method_mut() = head.as_ref().method.clone();
*req.version_mut() = Version::HTTP_2;
let mut skip_len = true;
// let mut has_date = false;
// Content length
let _ = match length {
BodySize::None => None,
BodySize::Stream => {
skip_len = false;
None
}
BodySize::Empty => req
.headers_mut()
.insert(CONTENT_LENGTH, HeaderValue::from_static("0")),
BodySize::Sized(len) => req.headers_mut().insert(
CONTENT_LENGTH,
HeaderValue::try_from(format!("{}", len)).unwrap(),
),
BodySize::Sized64(len) => req.headers_mut().insert(
CONTENT_LENGTH,
HeaderValue::try_from(format!("{}", len)).unwrap(),
),
};
// Extracting extra headers from RequestHeadType. HeaderMap::new() does not allocate.
let (head, extra_headers) = match head {
RequestHeadType::Owned(head) => (RequestHeadType::Owned(head), HeaderMap::new()),
RequestHeadType::Rc(head, extra_headers) => (
RequestHeadType::Rc(head, None),
extra_headers.unwrap_or_else(HeaderMap::new),
),
};
// merging headers from head and extra headers.
let headers = head
.as_ref()
.headers
.iter()
.filter(|(name, _)| !extra_headers.contains_key(*name))
.chain(extra_headers.iter());
// copy headers
for (key, value) in headers {
match *key {
CONNECTION | TRANSFER_ENCODING => continue, // http2 specific
CONTENT_LENGTH if skip_len => continue,
// DATE => has_date = true,
_ => (),
}
req.headers_mut().append(key, value.clone());
}
let res = poll_fn(|cx| io.poll_ready(cx)).await;
if let Err(e) = res {
release(io, pool, created, e.is_io());
return Err(SendRequestError::from(e));
}
let resp = match io.send_request(req, eof) {
Ok((fut, send)) => {
release(io, pool, created, false);
if !eof {
send_body(body, send).await?;
}
fut.await.map_err(SendRequestError::from)?
}
Err(e) => {
release(io, pool, created, e.is_io());
return Err(e.into());
}
};
let (parts, body) = resp.into_parts();
let payload = if head_req { Payload::None } else { body.into() };
let mut head = ResponseHead::new(parts.status);
head.version = parts.version;
head.headers = parts.headers.into();
Ok((head, payload))
}
async fn send_body<B: MessageBody>(
mut body: B,
mut send: SendStream<Bytes>,
) -> Result<(), SendRequestError> {
let mut buf = None;
loop {
if buf.is_none() {
match poll_fn(|cx| body.poll_next(cx)).await {
Some(Ok(b)) => {
send.reserve_capacity(b.len());
buf = Some(b);
}
Some(Err(e)) => return Err(e.into()),
None => {
if let Err(e) = send.send_data(Bytes::new(), true) {
return Err(e.into());
}
send.reserve_capacity(0);
return Ok(());
}
}
}
match poll_fn(|cx| send.poll_capacity(cx)).await {
None => return Ok(()),
Some(Ok(cap)) => {
let b = buf.as_mut().unwrap();
let len = b.len();
let bytes = b.split_to(std::cmp::min(cap, len));
if let Err(e) = send.send_data(bytes, false) {
return Err(e.into());
} else {
if !b.is_empty() {
send.reserve_capacity(b.len());
} else {
buf = None;
}
continue;
}
}
Some(Err(e)) => return Err(e.into()),
}
}
}
// release SendRequest object
fn release<T: AsyncRead + AsyncWrite + Unpin + 'static>(
io: SendRequest<Bytes>,
pool: Option<Acquired<T>>,
created: time::Instant,
close: bool,
) {
if let Some(mut pool) = pool {
if close {
pool.close(IoConnection::new(ConnectionType::H2(io), created, None));
} else {
pool.release(IoConnection::new(ConnectionType::H2(io), created, None));
}
}
}

View File

@ -0,0 +1,20 @@
//! Http client api
use http::Uri;
mod connection;
mod connector;
mod error;
mod h1proto;
mod h2proto;
mod pool;
pub use self::connection::Connection;
pub use self::connector::Connector;
pub use self::error::{ConnectError, FreezeRequestError, InvalidUrl, SendRequestError};
pub use self::pool::Protocol;
#[derive(Clone)]
pub struct Connect {
pub uri: Uri,
pub addr: Option<std::net::SocketAddr>,
}

View File

@ -0,0 +1,630 @@
use std::cell::RefCell;
use std::collections::VecDeque;
use std::future::Future;
use std::pin::Pin;
use std::rc::Rc;
use std::task::{Context, Poll};
use std::time::{Duration, Instant};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::time::{delay_for, Delay};
use actix_service::Service;
use actix_utils::{oneshot, task::LocalWaker};
use bytes::Bytes;
use futures::future::{poll_fn, FutureExt, LocalBoxFuture};
use h2::client::{handshake, Connection, SendRequest};
use hashbrown::HashMap;
use http::uri::Authority;
use indexmap::IndexSet;
use slab::Slab;
use super::connection::{ConnectionType, IoConnection};
use super::error::ConnectError;
use super::Connect;
#[derive(Clone, Copy, PartialEq)]
/// Protocol version
pub enum Protocol {
Http1,
Http2,
}
#[derive(Hash, Eq, PartialEq, Clone, Debug)]
pub(crate) struct Key {
authority: Authority,
}
impl From<Authority> for Key {
fn from(authority: Authority) -> Key {
Key { authority }
}
}
/// Connections pool
pub(crate) struct ConnectionPool<T, Io: 'static>(Rc<RefCell<T>>, Rc<RefCell<Inner<Io>>>);
impl<T, Io> ConnectionPool<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
pub(crate) fn new(
connector: T,
conn_lifetime: Duration,
conn_keep_alive: Duration,
disconnect_timeout: Option<Duration>,
limit: usize,
) -> Self {
ConnectionPool(
Rc::new(RefCell::new(connector)),
Rc::new(RefCell::new(Inner {
conn_lifetime,
conn_keep_alive,
disconnect_timeout,
limit,
acquired: 0,
waiters: Slab::new(),
waiters_queue: IndexSet::new(),
available: HashMap::new(),
waker: LocalWaker::new(),
})),
)
}
}
impl<T, Io> Clone for ConnectionPool<T, Io>
where
Io: 'static,
{
fn clone(&self) -> Self {
ConnectionPool(self.0.clone(), self.1.clone())
}
}
impl<T, Io> Service for ConnectionPool<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
type Request = Connect;
type Response = IoConnection<Io>;
type Error = ConnectError;
type Future = LocalBoxFuture<'static, Result<IoConnection<Io>, ConnectError>>;
fn poll_ready(&mut self, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
self.0.poll_ready(cx)
}
fn call(&mut self, req: Connect) -> Self::Future {
// start support future
actix_rt::spawn(ConnectorPoolSupport {
connector: self.0.clone(),
inner: self.1.clone(),
});
let mut connector = self.0.clone();
let inner = self.1.clone();
let fut = async move {
let key = if let Some(authority) = req.uri.authority_part() {
authority.clone().into()
} else {
return Err(ConnectError::Unresolverd);
};
// acquire connection
match poll_fn(|cx| Poll::Ready(inner.borrow_mut().acquire(&key, cx))).await {
Acquire::Acquired(io, created) => {
// use existing connection
return Ok(IoConnection::new(
io,
created,
Some(Acquired(key, Some(inner))),
));
}
Acquire::Available => {
// open tcp connection
let (io, proto) = connector.call(req).await?;
let guard = OpenGuard::new(key, inner);
if proto == Protocol::Http1 {
Ok(IoConnection::new(
ConnectionType::H1(io),
Instant::now(),
Some(guard.consume()),
))
} else {
let (snd, connection) = handshake(io).await?;
actix_rt::spawn(connection.map(|_| ()));
Ok(IoConnection::new(
ConnectionType::H2(snd),
Instant::now(),
Some(guard.consume()),
))
}
}
_ => {
// connection is not available, wait
let (rx, token) = inner.borrow_mut().wait_for(req);
let guard = WaiterGuard::new(key, token, inner);
let res = match rx.await {
Err(_) => Err(ConnectError::Disconnected),
Ok(res) => res,
};
guard.consume();
res
}
}
};
fut.boxed_local()
}
}
struct WaiterGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
key: Key,
token: usize,
inner: Option<Rc<RefCell<Inner<Io>>>>,
}
impl<Io> WaiterGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn new(key: Key, token: usize, inner: Rc<RefCell<Inner<Io>>>) -> Self {
Self {
key,
token,
inner: Some(inner),
}
}
fn consume(mut self) {
let _ = self.inner.take();
}
}
impl<Io> Drop for WaiterGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn drop(&mut self) {
if let Some(i) = self.inner.take() {
let mut inner = i.as_ref().borrow_mut();
inner.release_waiter(&self.key, self.token);
inner.check_availibility();
}
}
}
struct OpenGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
key: Key,
inner: Option<Rc<RefCell<Inner<Io>>>>,
}
impl<Io> OpenGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn new(key: Key, inner: Rc<RefCell<Inner<Io>>>) -> Self {
Self {
key,
inner: Some(inner),
}
}
fn consume(mut self) -> Acquired<Io> {
Acquired(self.key.clone(), self.inner.take())
}
}
impl<Io> Drop for OpenGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn drop(&mut self) {
if let Some(i) = self.inner.take() {
let mut inner = i.as_ref().borrow_mut();
inner.release();
inner.check_availibility();
}
}
}
enum Acquire<T> {
Acquired(ConnectionType<T>, Instant),
Available,
NotAvailable,
}
struct AvailableConnection<Io> {
io: ConnectionType<Io>,
used: Instant,
created: Instant,
}
pub(crate) struct Inner<Io> {
conn_lifetime: Duration,
conn_keep_alive: Duration,
disconnect_timeout: Option<Duration>,
limit: usize,
acquired: usize,
available: HashMap<Key, VecDeque<AvailableConnection<Io>>>,
waiters: Slab<
Option<(
Connect,
oneshot::Sender<Result<IoConnection<Io>, ConnectError>>,
)>,
>,
waiters_queue: IndexSet<(Key, usize)>,
waker: LocalWaker,
}
impl<Io> Inner<Io> {
fn reserve(&mut self) {
self.acquired += 1;
}
fn release(&mut self) {
self.acquired -= 1;
}
fn release_waiter(&mut self, key: &Key, token: usize) {
self.waiters.remove(token);
let _ = self.waiters_queue.shift_remove(&(key.clone(), token));
}
}
impl<Io> Inner<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
/// connection is not available, wait
fn wait_for(
&mut self,
connect: Connect,
) -> (
oneshot::Receiver<Result<IoConnection<Io>, ConnectError>>,
usize,
) {
let (tx, rx) = oneshot::channel();
let key: Key = connect.uri.authority_part().unwrap().clone().into();
let entry = self.waiters.vacant_entry();
let token = entry.key();
entry.insert(Some((connect, tx)));
assert!(self.waiters_queue.insert((key, token)));
(rx, token)
}
fn acquire(&mut self, key: &Key, cx: &mut Context) -> Acquire<Io> {
// check limits
if self.limit > 0 && self.acquired >= self.limit {
return Acquire::NotAvailable;
}
self.reserve();
// check if open connection is available
// cleanup stale connections at the same time
if let Some(ref mut connections) = self.available.get_mut(key) {
let now = Instant::now();
while let Some(conn) = connections.pop_back() {
// check if it still usable
if (now - conn.used) > self.conn_keep_alive
|| (now - conn.created) > self.conn_lifetime
{
if let Some(timeout) = self.disconnect_timeout {
if let ConnectionType::H1(io) = conn.io {
actix_rt::spawn(CloseConnection::new(io, timeout))
}
}
} else {
let mut io = conn.io;
let mut buf = [0; 2];
if let ConnectionType::H1(ref mut s) = io {
match Pin::new(s).poll_read(cx, &mut buf) {
Poll::Pending => (),
Poll::Ready(Ok(n)) if n > 0 => {
if let Some(timeout) = self.disconnect_timeout {
if let ConnectionType::H1(io) = io {
actix_rt::spawn(CloseConnection::new(
io, timeout,
))
}
}
continue;
}
_ => continue,
}
}
return Acquire::Acquired(io, conn.created);
}
}
}
Acquire::Available
}
fn release_conn(&mut self, key: &Key, io: ConnectionType<Io>, created: Instant) {
self.acquired -= 1;
self.available
.entry(key.clone())
.or_insert_with(VecDeque::new)
.push_back(AvailableConnection {
io,
created,
used: Instant::now(),
});
self.check_availibility();
}
fn release_close(&mut self, io: ConnectionType<Io>) {
self.acquired -= 1;
if let Some(timeout) = self.disconnect_timeout {
if let ConnectionType::H1(io) = io {
actix_rt::spawn(CloseConnection::new(io, timeout))
}
}
self.check_availibility();
}
fn check_availibility(&self) {
if !self.waiters_queue.is_empty() && self.acquired < self.limit {
self.waker.wake();
}
}
}
struct CloseConnection<T> {
io: T,
timeout: Delay,
}
impl<T> CloseConnection<T>
where
T: AsyncWrite + Unpin,
{
fn new(io: T, timeout: Duration) -> Self {
CloseConnection {
io,
timeout: delay_for(timeout),
}
}
}
impl<T> Future for CloseConnection<T>
where
T: AsyncWrite + Unpin,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<()> {
let this = self.get_mut();
match Pin::new(&mut this.timeout).poll(cx) {
Poll::Ready(_) => Poll::Ready(()),
Poll::Pending => match Pin::new(&mut this.io).poll_shutdown(cx) {
Poll::Ready(_) => Poll::Ready(()),
Poll::Pending => Poll::Pending,
},
}
}
}
struct ConnectorPoolSupport<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
connector: T,
inner: Rc<RefCell<Inner<Io>>>,
}
impl<T, Io> Future for ConnectorPoolSupport<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>,
T::Future: 'static,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let this = unsafe { self.get_unchecked_mut() };
let mut inner = this.inner.as_ref().borrow_mut();
inner.waker.register(cx.waker());
// check waiters
loop {
let (key, token) = {
if let Some((key, token)) = inner.waiters_queue.get_index(0) {
(key.clone(), *token)
} else {
break;
}
};
if inner.waiters.get(token).unwrap().is_none() {
continue;
}
match inner.acquire(&key, cx) {
Acquire::NotAvailable => break,
Acquire::Acquired(io, created) => {
let tx = inner.waiters.get_mut(token).unwrap().take().unwrap().1;
if let Err(conn) = tx.send(Ok(IoConnection::new(
io,
created,
Some(Acquired(key.clone(), Some(this.inner.clone()))),
))) {
let (io, created) = conn.unwrap().into_inner();
inner.release_conn(&key, io, created);
}
}
Acquire::Available => {
let (connect, tx) =
inner.waiters.get_mut(token).unwrap().take().unwrap();
OpenWaitingConnection::spawn(
key.clone(),
tx,
this.inner.clone(),
this.connector.call(connect),
);
}
}
let _ = inner.waiters_queue.swap_remove_index(0);
}
Poll::Pending
}
}
struct OpenWaitingConnection<F, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fut: F,
key: Key,
h2: Option<
LocalBoxFuture<
'static,
Result<(SendRequest<Bytes>, Connection<Io, Bytes>), h2::Error>,
>,
>,
rx: Option<oneshot::Sender<Result<IoConnection<Io>, ConnectError>>>,
inner: Option<Rc<RefCell<Inner<Io>>>>,
}
impl<F, Io> OpenWaitingConnection<F, Io>
where
F: Future<Output = Result<(Io, Protocol), ConnectError>> + 'static,
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn spawn(
key: Key,
rx: oneshot::Sender<Result<IoConnection<Io>, ConnectError>>,
inner: Rc<RefCell<Inner<Io>>>,
fut: F,
) {
actix_rt::spawn(OpenWaitingConnection {
key,
fut,
h2: None,
rx: Some(rx),
inner: Some(inner),
})
}
}
impl<F, Io> Drop for OpenWaitingConnection<F, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn drop(&mut self) {
if let Some(inner) = self.inner.take() {
let mut inner = inner.as_ref().borrow_mut();
inner.release();
inner.check_availibility();
}
}
}
impl<F, Io> Future for OpenWaitingConnection<F, Io>
where
F: Future<Output = Result<(Io, Protocol), ConnectError>>,
Io: AsyncRead + AsyncWrite + Unpin,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let this = unsafe { self.get_unchecked_mut() };
if let Some(ref mut h2) = this.h2 {
return match Pin::new(h2).poll(cx) {
Poll::Ready(Ok((snd, connection))) => {
actix_rt::spawn(connection.map(|_| ()));
let rx = this.rx.take().unwrap();
let _ = rx.send(Ok(IoConnection::new(
ConnectionType::H2(snd),
Instant::now(),
Some(Acquired(this.key.clone(), this.inner.take())),
)));
Poll::Ready(())
}
Poll::Pending => Poll::Pending,
Poll::Ready(Err(err)) => {
let _ = this.inner.take();
if let Some(rx) = this.rx.take() {
let _ = rx.send(Err(ConnectError::H2(err)));
}
Poll::Ready(())
}
};
}
match unsafe { Pin::new_unchecked(&mut this.fut) }.poll(cx) {
Poll::Ready(Err(err)) => {
let _ = this.inner.take();
if let Some(rx) = this.rx.take() {
let _ = rx.send(Err(err));
}
Poll::Ready(())
}
Poll::Ready(Ok((io, proto))) => {
if proto == Protocol::Http1 {
let rx = this.rx.take().unwrap();
let _ = rx.send(Ok(IoConnection::new(
ConnectionType::H1(io),
Instant::now(),
Some(Acquired(this.key.clone(), this.inner.take())),
)));
Poll::Ready(())
} else {
this.h2 = Some(handshake(io).boxed_local());
unsafe { Pin::new_unchecked(this) }.poll(cx)
}
}
Poll::Pending => Poll::Pending,
}
}
}
pub(crate) struct Acquired<T>(Key, Option<Rc<RefCell<Inner<T>>>>);
impl<T> Acquired<T>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
pub(crate) fn close(&mut self, conn: IoConnection<T>) {
if let Some(inner) = self.1.take() {
let (io, _) = conn.into_inner();
inner.as_ref().borrow_mut().release_close(io);
}
}
pub(crate) fn release(&mut self, conn: IoConnection<T>) {
if let Some(inner) = self.1.take() {
let (io, created) = conn.into_inner();
inner
.as_ref()
.borrow_mut()
.release_conn(&self.0, io, created);
}
}
}
impl<T> Drop for Acquired<T> {
fn drop(&mut self) {
if let Some(inner) = self.1.take() {
inner.as_ref().borrow_mut().release();
}
}
}

View File

@ -0,0 +1,42 @@
use std::cell::UnsafeCell;
use std::rc::Rc;
use std::task::{Context, Poll};
use actix_service::Service;
#[doc(hidden)]
/// Service that allows to turn non-clone service to a service with `Clone` impl
pub(crate) struct CloneableService<T>(Rc<UnsafeCell<T>>);
impl<T> CloneableService<T> {
pub(crate) fn new(service: T) -> Self
where
T: Service,
{
Self(Rc::new(UnsafeCell::new(service)))
}
}
impl<T> Clone for CloneableService<T> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
impl<T> Service for CloneableService<T>
where
T: Service,
{
type Request = T::Request;
type Response = T::Response;
type Error = T::Error;
type Future = T::Future;
fn poll_ready(&mut self, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
unsafe { &mut *self.0.as_ref().get() }.poll_ready(cx)
}
fn call(&mut self, req: T::Request) -> Self::Future {
unsafe { &mut *self.0.as_ref().get() }.call(req)
}
}

281
actix-http/src/config.rs Normal file
View File

@ -0,0 +1,281 @@
use std::cell::UnsafeCell;
use std::fmt;
use std::fmt::Write;
use std::rc::Rc;
use std::time::{Duration, Instant};
use actix_rt::time::{delay, delay_for, Delay};
use bytes::BytesMut;
use futures::{future, FutureExt};
use time;
// "Sun, 06 Nov 1994 08:49:37 GMT".len()
const DATE_VALUE_LENGTH: usize = 29;
#[derive(Debug, PartialEq, Clone, Copy)]
/// Server keep-alive setting
pub enum KeepAlive {
/// Keep alive in seconds
Timeout(usize),
/// Relay on OS to shutdown tcp connection
Os,
/// Disabled
Disabled,
}
impl From<usize> for KeepAlive {
fn from(keepalive: usize) -> Self {
KeepAlive::Timeout(keepalive)
}
}
impl From<Option<usize>> for KeepAlive {
fn from(keepalive: Option<usize>) -> Self {
if let Some(keepalive) = keepalive {
KeepAlive::Timeout(keepalive)
} else {
KeepAlive::Disabled
}
}
}
/// Http service configuration
pub struct ServiceConfig(Rc<Inner>);
struct Inner {
keep_alive: Option<Duration>,
client_timeout: u64,
client_disconnect: u64,
ka_enabled: bool,
timer: DateService,
}
impl Clone for ServiceConfig {
fn clone(&self) -> Self {
ServiceConfig(self.0.clone())
}
}
impl Default for ServiceConfig {
fn default() -> Self {
Self::new(KeepAlive::Timeout(5), 0, 0)
}
}
impl ServiceConfig {
/// Create instance of `ServiceConfig`
pub fn new(
keep_alive: KeepAlive,
client_timeout: u64,
client_disconnect: u64,
) -> ServiceConfig {
let (keep_alive, ka_enabled) = match keep_alive {
KeepAlive::Timeout(val) => (val as u64, true),
KeepAlive::Os => (0, true),
KeepAlive::Disabled => (0, false),
};
let keep_alive = if ka_enabled && keep_alive > 0 {
Some(Duration::from_secs(keep_alive))
} else {
None
};
ServiceConfig(Rc::new(Inner {
keep_alive,
ka_enabled,
client_timeout,
client_disconnect,
timer: DateService::new(),
}))
}
#[inline]
/// Keep alive duration if configured.
pub fn keep_alive(&self) -> Option<Duration> {
self.0.keep_alive
}
#[inline]
/// Return state of connection keep-alive funcitonality
pub fn keep_alive_enabled(&self) -> bool {
self.0.ka_enabled
}
#[inline]
/// Client timeout for first request.
pub fn client_timer(&self) -> Option<Delay> {
let delay_time = self.0.client_timeout;
if delay_time != 0 {
Some(delay(
self.0.timer.now() + Duration::from_millis(delay_time),
))
} else {
None
}
}
/// Client timeout for first request.
pub fn client_timer_expire(&self) -> Option<Instant> {
let delay = self.0.client_timeout;
if delay != 0 {
Some(self.0.timer.now() + Duration::from_millis(delay))
} else {
None
}
}
/// Client disconnect timer
pub fn client_disconnect_timer(&self) -> Option<Instant> {
let delay = self.0.client_disconnect;
if delay != 0 {
Some(self.0.timer.now() + Duration::from_millis(delay))
} else {
None
}
}
#[inline]
/// Return keep-alive timer delay is configured.
pub fn keep_alive_timer(&self) -> Option<Delay> {
if let Some(ka) = self.0.keep_alive {
Some(delay(self.0.timer.now() + ka))
} else {
None
}
}
/// Keep-alive expire time
pub fn keep_alive_expire(&self) -> Option<Instant> {
if let Some(ka) = self.0.keep_alive {
Some(self.0.timer.now() + ka)
} else {
None
}
}
#[inline]
pub(crate) fn now(&self) -> Instant {
self.0.timer.now()
}
#[doc(hidden)]
pub fn set_date(&self, dst: &mut BytesMut) {
let mut buf: [u8; 39] = [0; 39];
buf[..6].copy_from_slice(b"date: ");
self.0
.timer
.set_date(|date| buf[6..35].copy_from_slice(&date.bytes));
buf[35..].copy_from_slice(b"\r\n\r\n");
dst.extend_from_slice(&buf);
}
pub(crate) fn set_date_header(&self, dst: &mut BytesMut) {
self.0
.timer
.set_date(|date| dst.extend_from_slice(&date.bytes));
}
}
#[derive(Copy, Clone)]
struct Date {
bytes: [u8; DATE_VALUE_LENGTH],
pos: usize,
}
impl Date {
fn new() -> Date {
let mut date = Date {
bytes: [0; DATE_VALUE_LENGTH],
pos: 0,
};
date.update();
date
}
fn update(&mut self) {
self.pos = 0;
write!(self, "{}", time::at_utc(time::get_time()).rfc822()).unwrap();
}
}
impl fmt::Write for Date {
fn write_str(&mut self, s: &str) -> fmt::Result {
let len = s.len();
self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes());
self.pos += len;
Ok(())
}
}
#[derive(Clone)]
struct DateService(Rc<DateServiceInner>);
struct DateServiceInner {
current: UnsafeCell<Option<(Date, Instant)>>,
}
impl DateServiceInner {
fn new() -> Self {
DateServiceInner {
current: UnsafeCell::new(None),
}
}
fn reset(&self) {
unsafe { (&mut *self.current.get()).take() };
}
fn update(&self) {
let now = Instant::now();
let date = Date::new();
*(unsafe { &mut *self.current.get() }) = Some((date, now));
}
}
impl DateService {
fn new() -> Self {
DateService(Rc::new(DateServiceInner::new()))
}
fn check_date(&self) {
if unsafe { (&*self.0.current.get()).is_none() } {
self.0.update();
// periodic date update
let s = self.clone();
actix_rt::spawn(delay_for(Duration::from_millis(500)).then(move |_| {
s.0.reset();
future::ready(())
}));
}
}
fn now(&self) -> Instant {
self.check_date();
unsafe { (&*self.0.current.get()).as_ref().unwrap().1 }
}
fn set_date<F: FnMut(&Date)>(&self, mut f: F) {
self.check_date();
f(&unsafe { (&*self.0.current.get()).as_ref().unwrap().0 })
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_date_len() {
assert_eq!(DATE_VALUE_LENGTH, "Sun, 06 Nov 1994 08:49:37 GMT".len());
}
#[actix_rt::test]
async fn test_date() {
let settings = ServiceConfig::new(KeepAlive::Os, 0, 0);
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf1);
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf2);
assert_eq!(buf1, buf2);
}
}

View File

@ -0,0 +1,260 @@
use std::borrow::Cow;
use chrono::Duration;
use time::Tm;
use super::{Cookie, SameSite};
/// Structure that follows the builder pattern for building `Cookie` structs.
///
/// To construct a cookie:
///
/// 1. Call [`Cookie::build`](struct.Cookie.html#method.build) to start building.
/// 2. Use any of the builder methods to set fields in the cookie.
/// 3. Call [finish](#method.finish) to retrieve the built cookie.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Cookie;
///
/// # fn main() {
/// let cookie: Cookie = Cookie::build("name", "value")
/// .domain("www.rust-lang.org")
/// .path("/")
/// .secure(true)
/// .http_only(true)
/// .max_age(84600)
/// .finish();
/// # }
/// ```
#[derive(Debug, Clone)]
pub struct CookieBuilder {
/// The cookie being built.
cookie: Cookie<'static>,
}
impl CookieBuilder {
/// Creates a new `CookieBuilder` instance from the given name and value.
///
/// This method is typically called indirectly via
/// [Cookie::build](struct.Cookie.html#method.build).
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Cookie;
///
/// let c = Cookie::build("foo", "bar").finish();
/// assert_eq!(c.name_value(), ("foo", "bar"));
/// ```
pub fn new<N, V>(name: N, value: V) -> CookieBuilder
where
N: Into<Cow<'static, str>>,
V: Into<Cow<'static, str>>,
{
CookieBuilder {
cookie: Cookie::new(name, value),
}
}
/// Sets the `expires` field in the cookie being built.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Cookie;
///
/// # fn main() {
/// let c = Cookie::build("foo", "bar")
/// .expires(time::now())
/// .finish();
///
/// assert!(c.expires().is_some());
/// # }
/// ```
#[inline]
pub fn expires(mut self, when: Tm) -> CookieBuilder {
self.cookie.set_expires(when);
self
}
/// Sets the `max_age` field in seconds in the cookie being built.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Cookie;
///
/// # fn main() {
/// let c = Cookie::build("foo", "bar")
/// .max_age(1800)
/// .finish();
///
/// assert_eq!(c.max_age(), Some(time::Duration::seconds(30 * 60)));
/// # }
/// ```
#[inline]
pub fn max_age(self, seconds: i64) -> CookieBuilder {
self.max_age_time(Duration::seconds(seconds))
}
/// Sets the `max_age` field in the cookie being built.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Cookie;
///
/// # fn main() {
/// let c = Cookie::build("foo", "bar")
/// .max_age_time(time::Duration::minutes(30))
/// .finish();
///
/// assert_eq!(c.max_age(), Some(time::Duration::seconds(30 * 60)));
/// # }
/// ```
#[inline]
pub fn max_age_time(mut self, value: Duration) -> CookieBuilder {
self.cookie.set_max_age(value);
self
}
/// Sets the `domain` field in the cookie being built.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Cookie;
///
/// let c = Cookie::build("foo", "bar")
/// .domain("www.rust-lang.org")
/// .finish();
///
/// assert_eq!(c.domain(), Some("www.rust-lang.org"));
/// ```
pub fn domain<D: Into<Cow<'static, str>>>(mut self, value: D) -> CookieBuilder {
self.cookie.set_domain(value);
self
}
/// Sets the `path` field in the cookie being built.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Cookie;
///
/// let c = Cookie::build("foo", "bar")
/// .path("/")
/// .finish();
///
/// assert_eq!(c.path(), Some("/"));
/// ```
pub fn path<P: Into<Cow<'static, str>>>(mut self, path: P) -> CookieBuilder {
self.cookie.set_path(path);
self
}
/// Sets the `secure` field in the cookie being built.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Cookie;
///
/// let c = Cookie::build("foo", "bar")
/// .secure(true)
/// .finish();
///
/// assert_eq!(c.secure(), Some(true));
/// ```
#[inline]
pub fn secure(mut self, value: bool) -> CookieBuilder {
self.cookie.set_secure(value);
self
}
/// Sets the `http_only` field in the cookie being built.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Cookie;
///
/// let c = Cookie::build("foo", "bar")
/// .http_only(true)
/// .finish();
///
/// assert_eq!(c.http_only(), Some(true));
/// ```
#[inline]
pub fn http_only(mut self, value: bool) -> CookieBuilder {
self.cookie.set_http_only(value);
self
}
/// Sets the `same_site` field in the cookie being built.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{Cookie, SameSite};
///
/// let c = Cookie::build("foo", "bar")
/// .same_site(SameSite::Strict)
/// .finish();
///
/// assert_eq!(c.same_site(), Some(SameSite::Strict));
/// ```
#[inline]
pub fn same_site(mut self, value: SameSite) -> CookieBuilder {
self.cookie.set_same_site(value);
self
}
/// Makes the cookie being built 'permanent' by extending its expiration and
/// max age 20 years into the future.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Cookie;
/// use chrono::Duration;
///
/// # fn main() {
/// let c = Cookie::build("foo", "bar")
/// .permanent()
/// .finish();
///
/// assert_eq!(c.max_age(), Some(Duration::days(365 * 20)));
/// # assert!(c.expires().is_some());
/// # }
/// ```
#[inline]
pub fn permanent(mut self) -> CookieBuilder {
self.cookie.make_permanent();
self
}
/// Finishes building and returns the built `Cookie`.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Cookie;
///
/// let c = Cookie::build("foo", "bar")
/// .domain("crates.io")
/// .path("/")
/// .finish();
///
/// assert_eq!(c.name_value(), ("foo", "bar"));
/// assert_eq!(c.domain(), Some("crates.io"));
/// assert_eq!(c.path(), Some("/"));
/// ```
#[inline]
pub fn finish(self) -> Cookie<'static> {
self.cookie
}
}

View File

@ -0,0 +1,71 @@
use std::borrow::Borrow;
use std::hash::{Hash, Hasher};
use std::ops::{Deref, DerefMut};
use super::Cookie;
/// A `DeltaCookie` is a helper structure used in a cookie jar. It wraps a
/// `Cookie` so that it can be hashed and compared purely by name. It further
/// records whether the wrapped cookie is a "removal" cookie, that is, a cookie
/// that when sent to the client removes the named cookie on the client's
/// machine.
#[derive(Clone, Debug)]
pub struct DeltaCookie {
pub cookie: Cookie<'static>,
pub removed: bool,
}
impl DeltaCookie {
/// Create a new `DeltaCookie` that is being added to a jar.
#[inline]
pub fn added(cookie: Cookie<'static>) -> DeltaCookie {
DeltaCookie {
cookie,
removed: false,
}
}
/// Create a new `DeltaCookie` that is being removed from a jar. The
/// `cookie` should be a "removal" cookie.
#[inline]
pub fn removed(cookie: Cookie<'static>) -> DeltaCookie {
DeltaCookie {
cookie,
removed: true,
}
}
}
impl Deref for DeltaCookie {
type Target = Cookie<'static>;
fn deref(&self) -> &Cookie<'static> {
&self.cookie
}
}
impl DerefMut for DeltaCookie {
fn deref_mut(&mut self) -> &mut Cookie<'static> {
&mut self.cookie
}
}
impl PartialEq for DeltaCookie {
fn eq(&self, other: &DeltaCookie) -> bool {
self.name() == other.name()
}
}
impl Eq for DeltaCookie {}
impl Hash for DeltaCookie {
fn hash<H: Hasher>(&self, state: &mut H) {
self.name().hash(state);
}
}
impl Borrow<str> for DeltaCookie {
fn borrow(&self) -> &str {
self.name()
}
}

View File

@ -0,0 +1,98 @@
//! This module contains types that represent cookie properties that are not yet
//! standardized. That is, _draft_ features.
use std::fmt;
/// The `SameSite` cookie attribute.
///
/// A cookie with a `SameSite` attribute is imposed restrictions on when it is
/// sent to the origin server in a cross-site request. If the `SameSite`
/// attribute is "Strict", then the cookie is never sent in cross-site requests.
/// If the `SameSite` attribute is "Lax", the cookie is only sent in cross-site
/// requests with "safe" HTTP methods, i.e, `GET`, `HEAD`, `OPTIONS`, `TRACE`.
/// If the `SameSite` attribute is not present (made explicit via the
/// `SameSite::None` variant), then the cookie will be sent as normal.
///
/// **Note:** This cookie attribute is an HTTP draft! Its meaning and definition
/// are subject to change.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum SameSite {
/// The "Strict" `SameSite` attribute.
Strict,
/// The "Lax" `SameSite` attribute.
Lax,
/// No `SameSite` attribute.
None,
}
impl SameSite {
/// Returns `true` if `self` is `SameSite::Strict` and `false` otherwise.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::SameSite;
///
/// let strict = SameSite::Strict;
/// assert!(strict.is_strict());
/// assert!(!strict.is_lax());
/// assert!(!strict.is_none());
/// ```
#[inline]
pub fn is_strict(self) -> bool {
match self {
SameSite::Strict => true,
SameSite::Lax | SameSite::None => false,
}
}
/// Returns `true` if `self` is `SameSite::Lax` and `false` otherwise.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::SameSite;
///
/// let lax = SameSite::Lax;
/// assert!(lax.is_lax());
/// assert!(!lax.is_strict());
/// assert!(!lax.is_none());
/// ```
#[inline]
pub fn is_lax(self) -> bool {
match self {
SameSite::Lax => true,
SameSite::Strict | SameSite::None => false,
}
}
/// Returns `true` if `self` is `SameSite::None` and `false` otherwise.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::SameSite;
///
/// let none = SameSite::None;
/// assert!(none.is_none());
/// assert!(!none.is_lax());
/// assert!(!none.is_strict());
/// ```
#[inline]
pub fn is_none(self) -> bool {
match self {
SameSite::None => true,
SameSite::Lax | SameSite::Strict => false,
}
}
}
impl fmt::Display for SameSite {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
SameSite::Strict => write!(f, "Strict"),
SameSite::Lax => write!(f, "Lax"),
SameSite::None => Ok(()),
}
}
}

View File

@ -0,0 +1,655 @@
use std::collections::HashSet;
use std::mem::replace;
use chrono::Duration;
use super::delta::DeltaCookie;
use super::Cookie;
#[cfg(feature = "secure-cookies")]
use super::secure::{Key, PrivateJar, SignedJar};
/// A collection of cookies that tracks its modifications.
///
/// A `CookieJar` provides storage for any number of cookies. Any changes made
/// to the jar are tracked; the changes can be retrieved via the
/// [delta](#method.delta) method which returns an interator over the changes.
///
/// # Usage
///
/// A jar's life begins via [new](#method.new) and calls to
/// [`add_original`](#method.add_original):
///
/// ```rust
/// use actix_http::cookie::{Cookie, CookieJar};
///
/// let mut jar = CookieJar::new();
/// jar.add_original(Cookie::new("name", "value"));
/// jar.add_original(Cookie::new("second", "another"));
/// ```
///
/// Cookies can be added via [add](#method.add) and removed via
/// [remove](#method.remove). Finally, cookies can be looked up via
/// [get](#method.get):
///
/// ```rust
/// # use actix_http::cookie::{Cookie, CookieJar};
/// let mut jar = CookieJar::new();
/// jar.add(Cookie::new("a", "one"));
/// jar.add(Cookie::new("b", "two"));
///
/// assert_eq!(jar.get("a").map(|c| c.value()), Some("one"));
/// assert_eq!(jar.get("b").map(|c| c.value()), Some("two"));
///
/// jar.remove(Cookie::named("b"));
/// assert!(jar.get("b").is_none());
/// ```
///
/// # Deltas
///
/// A jar keeps track of any modifications made to it over time. The
/// modifications are recorded as cookies. The modifications can be retrieved
/// via [delta](#method.delta). Any new `Cookie` added to a jar via `add`
/// results in the same `Cookie` appearing in the `delta`; cookies added via
/// `add_original` do not count towards the delta. Any _original_ cookie that is
/// removed from a jar results in a "removal" cookie appearing in the delta. A
/// "removal" cookie is a cookie that a server sends so that the cookie is
/// removed from the client's machine.
///
/// Deltas are typically used to create `Set-Cookie` headers corresponding to
/// the changes made to a cookie jar over a period of time.
///
/// ```rust
/// # use actix_http::cookie::{Cookie, CookieJar};
/// let mut jar = CookieJar::new();
///
/// // original cookies don't affect the delta
/// jar.add_original(Cookie::new("original", "value"));
/// assert_eq!(jar.delta().count(), 0);
///
/// // new cookies result in an equivalent `Cookie` in the delta
/// jar.add(Cookie::new("a", "one"));
/// jar.add(Cookie::new("b", "two"));
/// assert_eq!(jar.delta().count(), 2);
///
/// // removing an original cookie adds a "removal" cookie to the delta
/// jar.remove(Cookie::named("original"));
/// assert_eq!(jar.delta().count(), 3);
///
/// // removing a new cookie that was added removes that `Cookie` from the delta
/// jar.remove(Cookie::named("a"));
/// assert_eq!(jar.delta().count(), 2);
/// ```
#[derive(Default, Debug, Clone)]
pub struct CookieJar {
original_cookies: HashSet<DeltaCookie>,
delta_cookies: HashSet<DeltaCookie>,
}
impl CookieJar {
/// Creates an empty cookie jar.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::CookieJar;
///
/// let jar = CookieJar::new();
/// assert_eq!(jar.iter().count(), 0);
/// ```
pub fn new() -> CookieJar {
CookieJar::default()
}
/// Returns a reference to the `Cookie` inside this jar with the name
/// `name`. If no such cookie exists, returns `None`.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie};
///
/// let mut jar = CookieJar::new();
/// assert!(jar.get("name").is_none());
///
/// jar.add(Cookie::new("name", "value"));
/// assert_eq!(jar.get("name").map(|c| c.value()), Some("value"));
/// ```
pub fn get(&self, name: &str) -> Option<&Cookie<'static>> {
self.delta_cookies
.get(name)
.or_else(|| self.original_cookies.get(name))
.and_then(|c| if !c.removed { Some(&c.cookie) } else { None })
}
/// Adds an "original" `cookie` to this jar. If an original cookie with the
/// same name already exists, it is replaced with `cookie`. Cookies added
/// with `add` take precedence and are not replaced by this method.
///
/// Adding an original cookie does not affect the [delta](#method.delta)
/// computation. This method is intended to be used to seed the cookie jar
/// with cookies received from a client's HTTP message.
///
/// For accurate `delta` computations, this method should not be called
/// after calling `remove`.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie};
///
/// let mut jar = CookieJar::new();
/// jar.add_original(Cookie::new("name", "value"));
/// jar.add_original(Cookie::new("second", "two"));
///
/// assert_eq!(jar.get("name").map(|c| c.value()), Some("value"));
/// assert_eq!(jar.get("second").map(|c| c.value()), Some("two"));
/// assert_eq!(jar.iter().count(), 2);
/// assert_eq!(jar.delta().count(), 0);
/// ```
pub fn add_original(&mut self, cookie: Cookie<'static>) {
self.original_cookies.replace(DeltaCookie::added(cookie));
}
/// Adds `cookie` to this jar. If a cookie with the same name already
/// exists, it is replaced with `cookie`.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie};
///
/// let mut jar = CookieJar::new();
/// jar.add(Cookie::new("name", "value"));
/// jar.add(Cookie::new("second", "two"));
///
/// assert_eq!(jar.get("name").map(|c| c.value()), Some("value"));
/// assert_eq!(jar.get("second").map(|c| c.value()), Some("two"));
/// assert_eq!(jar.iter().count(), 2);
/// assert_eq!(jar.delta().count(), 2);
/// ```
pub fn add(&mut self, cookie: Cookie<'static>) {
self.delta_cookies.replace(DeltaCookie::added(cookie));
}
/// Removes `cookie` from this jar. If an _original_ cookie with the same
/// name as `cookie` is present in the jar, a _removal_ cookie will be
/// present in the `delta` computation. To properly generate the removal
/// cookie, `cookie` must contain the same `path` and `domain` as the cookie
/// that was initially set.
///
/// A "removal" cookie is a cookie that has the same name as the original
/// cookie but has an empty value, a max-age of 0, and an expiration date
/// far in the past.
///
/// # Example
///
/// Removing an _original_ cookie results in a _removal_ cookie:
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie};
/// use chrono::Duration;
///
/// # fn main() {
/// let mut jar = CookieJar::new();
///
/// // Assume this cookie originally had a path of "/" and domain of "a.b".
/// jar.add_original(Cookie::new("name", "value"));
///
/// // If the path and domain were set, they must be provided to `remove`.
/// jar.remove(Cookie::build("name", "").path("/").domain("a.b").finish());
///
/// // The delta will contain the removal cookie.
/// let delta: Vec<_> = jar.delta().collect();
/// assert_eq!(delta.len(), 1);
/// assert_eq!(delta[0].name(), "name");
/// assert_eq!(delta[0].max_age(), Some(Duration::seconds(0)));
/// # }
/// ```
///
/// Removing a new cookie does not result in a _removal_ cookie:
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie};
///
/// let mut jar = CookieJar::new();
/// jar.add(Cookie::new("name", "value"));
/// assert_eq!(jar.delta().count(), 1);
///
/// jar.remove(Cookie::named("name"));
/// assert_eq!(jar.delta().count(), 0);
/// ```
pub fn remove(&mut self, mut cookie: Cookie<'static>) {
if self.original_cookies.contains(cookie.name()) {
cookie.set_value("");
cookie.set_max_age(Duration::seconds(0));
cookie.set_expires(time::now() - Duration::days(365));
self.delta_cookies.replace(DeltaCookie::removed(cookie));
} else {
self.delta_cookies.remove(cookie.name());
}
}
/// Removes `cookie` from this jar completely. This method differs from
/// `remove` in that no delta cookie is created under any condition. Neither
/// the `delta` nor `iter` methods will return a cookie that is removed
/// using this method.
///
/// # Example
///
/// Removing an _original_ cookie; no _removal_ cookie is generated:
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie};
/// use chrono::Duration;
///
/// # fn main() {
/// let mut jar = CookieJar::new();
///
/// // Add an original cookie and a new cookie.
/// jar.add_original(Cookie::new("name", "value"));
/// jar.add(Cookie::new("key", "value"));
/// assert_eq!(jar.delta().count(), 1);
/// assert_eq!(jar.iter().count(), 2);
///
/// // Now force remove the original cookie.
/// jar.force_remove(Cookie::new("name", "value"));
/// assert_eq!(jar.delta().count(), 1);
/// assert_eq!(jar.iter().count(), 1);
///
/// // Now force remove the new cookie.
/// jar.force_remove(Cookie::new("key", "value"));
/// assert_eq!(jar.delta().count(), 0);
/// assert_eq!(jar.iter().count(), 0);
/// # }
/// ```
pub fn force_remove<'a>(&mut self, cookie: Cookie<'a>) {
self.original_cookies.remove(cookie.name());
self.delta_cookies.remove(cookie.name());
}
/// Removes all cookies from this cookie jar.
#[deprecated(
since = "0.7.0",
note = "calling this method may not remove \
all cookies since the path and domain are not specified; use \
`remove` instead"
)]
pub fn clear(&mut self) {
self.delta_cookies.clear();
for delta in replace(&mut self.original_cookies, HashSet::new()) {
self.remove(delta.cookie);
}
}
/// Returns an iterator over cookies that represent the changes to this jar
/// over time. These cookies can be rendered directly as `Set-Cookie` header
/// values to affect the changes made to this jar on the client.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie};
///
/// let mut jar = CookieJar::new();
/// jar.add_original(Cookie::new("name", "value"));
/// jar.add_original(Cookie::new("second", "two"));
///
/// // Add new cookies.
/// jar.add(Cookie::new("new", "third"));
/// jar.add(Cookie::new("another", "fourth"));
/// jar.add(Cookie::new("yac", "fifth"));
///
/// // Remove some cookies.
/// jar.remove(Cookie::named("name"));
/// jar.remove(Cookie::named("another"));
///
/// // Delta contains two new cookies ("new", "yac") and a removal ("name").
/// assert_eq!(jar.delta().count(), 3);
/// ```
pub fn delta(&self) -> Delta {
Delta {
iter: self.delta_cookies.iter(),
}
}
/// Returns an iterator over all of the cookies present in this jar.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie};
///
/// let mut jar = CookieJar::new();
///
/// jar.add_original(Cookie::new("name", "value"));
/// jar.add_original(Cookie::new("second", "two"));
///
/// jar.add(Cookie::new("new", "third"));
/// jar.add(Cookie::new("another", "fourth"));
/// jar.add(Cookie::new("yac", "fifth"));
///
/// jar.remove(Cookie::named("name"));
/// jar.remove(Cookie::named("another"));
///
/// // There are three cookies in the jar: "second", "new", and "yac".
/// # assert_eq!(jar.iter().count(), 3);
/// for cookie in jar.iter() {
/// match cookie.name() {
/// "second" => assert_eq!(cookie.value(), "two"),
/// "new" => assert_eq!(cookie.value(), "third"),
/// "yac" => assert_eq!(cookie.value(), "fifth"),
/// _ => unreachable!("there are only three cookies in the jar")
/// }
/// }
/// ```
pub fn iter(&self) -> Iter {
Iter {
delta_cookies: self
.delta_cookies
.iter()
.chain(self.original_cookies.difference(&self.delta_cookies)),
}
}
/// Returns a `PrivateJar` with `self` as its parent jar using the key `key`
/// to sign/encrypt and verify/decrypt cookies added/retrieved from the
/// child jar.
///
/// Any modifications to the child jar will be reflected on the parent jar,
/// and any retrievals from the child jar will be made from the parent jar.
///
/// This method is only available when the `secure` feature is enabled.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{Cookie, CookieJar, Key};
///
/// // Generate a secure key.
/// let key = Key::generate();
///
/// // Add a private (signed + encrypted) cookie.
/// let mut jar = CookieJar::new();
/// jar.private(&key).add(Cookie::new("private", "text"));
///
/// // The cookie's contents are encrypted.
/// assert_ne!(jar.get("private").unwrap().value(), "text");
///
/// // They can be decrypted and verified through the child jar.
/// assert_eq!(jar.private(&key).get("private").unwrap().value(), "text");
///
/// // A tampered with cookie does not validate but still exists.
/// let mut cookie = jar.get("private").unwrap().clone();
/// jar.add(Cookie::new("private", cookie.value().to_string() + "!"));
/// assert!(jar.private(&key).get("private").is_none());
/// assert!(jar.get("private").is_some());
/// ```
#[cfg(feature = "secure-cookies")]
pub fn private(&mut self, key: &Key) -> PrivateJar {
PrivateJar::new(self, key)
}
/// Returns a `SignedJar` with `self` as its parent jar using the key `key`
/// to sign/verify cookies added/retrieved from the child jar.
///
/// Any modifications to the child jar will be reflected on the parent jar,
/// and any retrievals from the child jar will be made from the parent jar.
///
/// This method is only available when the `secure` feature is enabled.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{Cookie, CookieJar, Key};
///
/// // Generate a secure key.
/// let key = Key::generate();
///
/// // Add a signed cookie.
/// let mut jar = CookieJar::new();
/// jar.signed(&key).add(Cookie::new("signed", "text"));
///
/// // The cookie's contents are signed but still in plaintext.
/// assert_ne!(jar.get("signed").unwrap().value(), "text");
/// assert!(jar.get("signed").unwrap().value().contains("text"));
///
/// // They can be verified through the child jar.
/// assert_eq!(jar.signed(&key).get("signed").unwrap().value(), "text");
///
/// // A tampered with cookie does not validate but still exists.
/// let mut cookie = jar.get("signed").unwrap().clone();
/// jar.add(Cookie::new("signed", cookie.value().to_string() + "!"));
/// assert!(jar.signed(&key).get("signed").is_none());
/// assert!(jar.get("signed").is_some());
/// ```
#[cfg(feature = "secure-cookies")]
pub fn signed(&mut self, key: &Key) -> SignedJar {
SignedJar::new(self, key)
}
}
use std::collections::hash_set::Iter as HashSetIter;
/// Iterator over the changes to a cookie jar.
pub struct Delta<'a> {
iter: HashSetIter<'a, DeltaCookie>,
}
impl<'a> Iterator for Delta<'a> {
type Item = &'a Cookie<'static>;
fn next(&mut self) -> Option<&'a Cookie<'static>> {
self.iter.next().map(|c| &c.cookie)
}
}
use std::collections::hash_map::RandomState;
use std::collections::hash_set::Difference;
use std::iter::Chain;
/// Iterator over all of the cookies in a jar.
pub struct Iter<'a> {
delta_cookies:
Chain<HashSetIter<'a, DeltaCookie>, Difference<'a, DeltaCookie, RandomState>>,
}
impl<'a> Iterator for Iter<'a> {
type Item = &'a Cookie<'static>;
fn next(&mut self) -> Option<&'a Cookie<'static>> {
for cookie in self.delta_cookies.by_ref() {
if !cookie.removed {
return Some(&*cookie);
}
}
None
}
}
#[cfg(test)]
mod test {
#[cfg(feature = "secure-cookies")]
use super::Key;
use super::{Cookie, CookieJar};
#[test]
#[allow(deprecated)]
fn simple() {
let mut c = CookieJar::new();
c.add(Cookie::new("test", ""));
c.add(Cookie::new("test2", ""));
c.remove(Cookie::named("test"));
assert!(c.get("test").is_none());
assert!(c.get("test2").is_some());
c.add(Cookie::new("test3", ""));
c.clear();
assert!(c.get("test").is_none());
assert!(c.get("test2").is_none());
assert!(c.get("test3").is_none());
}
#[test]
fn jar_is_send() {
fn is_send<T: Send>(_: T) -> bool {
true
}
assert!(is_send(CookieJar::new()))
}
#[test]
#[cfg(feature = "secure-cookies")]
fn iter() {
let key = Key::generate();
let mut c = CookieJar::new();
c.add_original(Cookie::new("original", "original"));
c.add(Cookie::new("test", "test"));
c.add(Cookie::new("test2", "test2"));
c.add(Cookie::new("test3", "test3"));
assert_eq!(c.iter().count(), 4);
c.signed(&key).add(Cookie::new("signed", "signed"));
c.private(&key).add(Cookie::new("encrypted", "encrypted"));
assert_eq!(c.iter().count(), 6);
c.remove(Cookie::named("test"));
assert_eq!(c.iter().count(), 5);
c.remove(Cookie::named("signed"));
c.remove(Cookie::named("test2"));
assert_eq!(c.iter().count(), 3);
c.add(Cookie::new("test2", "test2"));
assert_eq!(c.iter().count(), 4);
c.remove(Cookie::named("test2"));
assert_eq!(c.iter().count(), 3);
}
#[test]
#[cfg(feature = "secure-cookies")]
fn delta() {
use chrono::Duration;
use std::collections::HashMap;
let mut c = CookieJar::new();
c.add_original(Cookie::new("original", "original"));
c.add_original(Cookie::new("original1", "original1"));
c.add(Cookie::new("test", "test"));
c.add(Cookie::new("test2", "test2"));
c.add(Cookie::new("test3", "test3"));
c.add(Cookie::new("test4", "test4"));
c.remove(Cookie::named("test"));
c.remove(Cookie::named("original"));
assert_eq!(c.delta().count(), 4);
let names: HashMap<_, _> = c.delta().map(|c| (c.name(), c.max_age())).collect();
assert!(names.get("test2").unwrap().is_none());
assert!(names.get("test3").unwrap().is_none());
assert!(names.get("test4").unwrap().is_none());
assert_eq!(names.get("original").unwrap(), &Some(Duration::seconds(0)));
}
#[test]
fn replace_original() {
let mut jar = CookieJar::new();
jar.add_original(Cookie::new("original_a", "a"));
jar.add_original(Cookie::new("original_b", "b"));
assert_eq!(jar.get("original_a").unwrap().value(), "a");
jar.add(Cookie::new("original_a", "av2"));
assert_eq!(jar.get("original_a").unwrap().value(), "av2");
}
#[test]
fn empty_delta() {
let mut jar = CookieJar::new();
jar.add(Cookie::new("name", "val"));
assert_eq!(jar.delta().count(), 1);
jar.remove(Cookie::named("name"));
assert_eq!(jar.delta().count(), 0);
jar.add_original(Cookie::new("name", "val"));
assert_eq!(jar.delta().count(), 0);
jar.remove(Cookie::named("name"));
assert_eq!(jar.delta().count(), 1);
jar.add(Cookie::new("name", "val"));
assert_eq!(jar.delta().count(), 1);
jar.remove(Cookie::named("name"));
assert_eq!(jar.delta().count(), 1);
}
#[test]
fn add_remove_add() {
let mut jar = CookieJar::new();
jar.add_original(Cookie::new("name", "val"));
assert_eq!(jar.delta().count(), 0);
jar.remove(Cookie::named("name"));
assert_eq!(jar.delta().filter(|c| c.value().is_empty()).count(), 1);
assert_eq!(jar.delta().count(), 1);
// The cookie's been deleted. Another original doesn't change that.
jar.add_original(Cookie::new("name", "val"));
assert_eq!(jar.delta().filter(|c| c.value().is_empty()).count(), 1);
assert_eq!(jar.delta().count(), 1);
jar.remove(Cookie::named("name"));
assert_eq!(jar.delta().filter(|c| c.value().is_empty()).count(), 1);
assert_eq!(jar.delta().count(), 1);
jar.add(Cookie::new("name", "val"));
assert_eq!(jar.delta().filter(|c| !c.value().is_empty()).count(), 1);
assert_eq!(jar.delta().count(), 1);
jar.remove(Cookie::named("name"));
assert_eq!(jar.delta().filter(|c| c.value().is_empty()).count(), 1);
assert_eq!(jar.delta().count(), 1);
}
#[test]
fn replace_remove() {
let mut jar = CookieJar::new();
jar.add_original(Cookie::new("name", "val"));
assert_eq!(jar.delta().count(), 0);
jar.add(Cookie::new("name", "val"));
assert_eq!(jar.delta().count(), 1);
assert_eq!(jar.delta().filter(|c| !c.value().is_empty()).count(), 1);
jar.remove(Cookie::named("name"));
assert_eq!(jar.delta().filter(|c| c.value().is_empty()).count(), 1);
}
#[test]
fn remove_with_path() {
let mut jar = CookieJar::new();
jar.add_original(Cookie::build("name", "val").finish());
assert_eq!(jar.iter().count(), 1);
assert_eq!(jar.delta().count(), 0);
assert_eq!(jar.iter().filter(|c| c.path().is_none()).count(), 1);
jar.remove(Cookie::build("name", "").path("/").finish());
assert_eq!(jar.iter().count(), 0);
assert_eq!(jar.delta().count(), 1);
assert_eq!(jar.delta().filter(|c| c.value().is_empty()).count(), 1);
assert_eq!(jar.delta().filter(|c| c.path() == Some("/")).count(), 1);
}
}

1106
actix-http/src/cookie/mod.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,425 @@
use std::borrow::Cow;
use std::cmp;
use std::convert::From;
use std::error::Error;
use std::fmt;
use std::str::Utf8Error;
use chrono::Duration;
use percent_encoding::percent_decode;
use super::{Cookie, CookieStr, SameSite};
/// Enum corresponding to a parsing error.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum ParseError {
/// The cookie did not contain a name/value pair.
MissingPair,
/// The cookie's name was empty.
EmptyName,
/// Decoding the cookie's name or value resulted in invalid UTF-8.
Utf8Error(Utf8Error),
/// It is discouraged to exhaustively match on this enum as its variants may
/// grow without a breaking-change bump in version numbers.
#[doc(hidden)]
__Nonexhasutive,
}
impl ParseError {
/// Returns a description of this error as a string
pub fn as_str(&self) -> &'static str {
match *self {
ParseError::MissingPair => "the cookie is missing a name/value pair",
ParseError::EmptyName => "the cookie's name is empty",
ParseError::Utf8Error(_) => {
"decoding the cookie's name or value resulted in invalid UTF-8"
}
ParseError::__Nonexhasutive => unreachable!("__Nonexhasutive ParseError"),
}
}
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.as_str())
}
}
impl From<Utf8Error> for ParseError {
fn from(error: Utf8Error) -> ParseError {
ParseError::Utf8Error(error)
}
}
impl Error for ParseError {
fn description(&self) -> &str {
self.as_str()
}
}
fn indexes_of(needle: &str, haystack: &str) -> Option<(usize, usize)> {
let haystack_start = haystack.as_ptr() as usize;
let needle_start = needle.as_ptr() as usize;
if needle_start < haystack_start {
return None;
}
if (needle_start + needle.len()) > (haystack_start + haystack.len()) {
return None;
}
let start = needle_start - haystack_start;
let end = start + needle.len();
Some((start, end))
}
fn name_val_decoded(
name: &str,
val: &str,
) -> Result<(CookieStr, CookieStr), ParseError> {
let decoded_name = percent_decode(name.as_bytes()).decode_utf8()?;
let decoded_value = percent_decode(val.as_bytes()).decode_utf8()?;
let name = CookieStr::Concrete(Cow::Owned(decoded_name.into_owned()));
let val = CookieStr::Concrete(Cow::Owned(decoded_value.into_owned()));
Ok((name, val))
}
// This function does the real parsing but _does not_ set the `cookie_string` in
// the returned cookie object. This only exists so that the borrow to `s` is
// returned at the end of the call, allowing the `cookie_string` field to be
// set in the outer `parse` function.
fn parse_inner<'c>(s: &str, decode: bool) -> Result<Cookie<'c>, ParseError> {
let mut attributes = s.split(';');
let key_value = match attributes.next() {
Some(s) => s,
_ => panic!(),
};
// Determine the name = val.
let (name, value) = match key_value.find('=') {
Some(i) => (key_value[..i].trim(), key_value[(i + 1)..].trim()),
None => return Err(ParseError::MissingPair),
};
if name.is_empty() {
return Err(ParseError::EmptyName);
}
// Create a cookie with all of the defaults. We'll fill things in while we
// iterate through the parameters below.
let (name, value) = if decode {
name_val_decoded(name, value)?
} else {
let name_indexes = indexes_of(name, s).expect("name sub");
let value_indexes = indexes_of(value, s).expect("value sub");
let name = CookieStr::Indexed(name_indexes.0, name_indexes.1);
let value = CookieStr::Indexed(value_indexes.0, value_indexes.1);
(name, value)
};
let mut cookie = Cookie {
name,
value,
cookie_string: None,
expires: None,
max_age: None,
domain: None,
path: None,
secure: None,
http_only: None,
same_site: None,
};
for attr in attributes {
let (key, value) = match attr.find('=') {
Some(i) => (attr[..i].trim(), Some(attr[(i + 1)..].trim())),
None => (attr.trim(), None),
};
match (&*key.to_ascii_lowercase(), value) {
("secure", _) => cookie.secure = Some(true),
("httponly", _) => cookie.http_only = Some(true),
("max-age", Some(v)) => {
// See RFC 6265 Section 5.2.2, negative values indicate that the
// earliest possible expiration time should be used, so set the
// max age as 0 seconds.
cookie.max_age = match v.parse() {
Ok(val) if val <= 0 => Some(Duration::zero()),
Ok(val) => {
// Don't panic if the max age seconds is greater than what's supported by
// `Duration`.
let val = cmp::min(val, Duration::max_value().num_seconds());
Some(Duration::seconds(val))
}
Err(_) => continue,
};
}
("domain", Some(mut domain)) if !domain.is_empty() => {
if domain.starts_with('.') {
domain = &domain[1..];
}
let (i, j) = indexes_of(domain, s).expect("domain sub");
cookie.domain = Some(CookieStr::Indexed(i, j));
}
("path", Some(v)) => {
let (i, j) = indexes_of(v, s).expect("path sub");
cookie.path = Some(CookieStr::Indexed(i, j));
}
("samesite", Some(v)) => {
if v.eq_ignore_ascii_case("strict") {
cookie.same_site = Some(SameSite::Strict);
} else if v.eq_ignore_ascii_case("lax") {
cookie.same_site = Some(SameSite::Lax);
} else {
// We do nothing here, for now. When/if the `SameSite`
// attribute becomes standard, the spec says that we should
// ignore this cookie, i.e, fail to parse it, when an
// invalid value is passed in. The draft is at
// http://httpwg.org/http-extensions/draft-ietf-httpbis-cookie-same-site.html.
}
}
("expires", Some(v)) => {
// Try strptime with three date formats according to
// http://tools.ietf.org/html/rfc2616#section-3.3.1. Try
// additional ones as encountered in the real world.
let tm = time::strptime(v, "%a, %d %b %Y %H:%M:%S %Z")
.or_else(|_| time::strptime(v, "%A, %d-%b-%y %H:%M:%S %Z"))
.or_else(|_| time::strptime(v, "%a, %d-%b-%Y %H:%M:%S %Z"))
.or_else(|_| time::strptime(v, "%a %b %d %H:%M:%S %Y"));
if let Ok(time) = tm {
cookie.expires = Some(time)
}
}
_ => {
// We're going to be permissive here. If we have no idea what
// this is, then it's something nonstandard. We're not going to
// store it (because it's not compliant), but we're also not
// going to emit an error.
}
}
}
Ok(cookie)
}
pub fn parse_cookie<'c, S>(cow: S, decode: bool) -> Result<Cookie<'c>, ParseError>
where
S: Into<Cow<'c, str>>,
{
let s = cow.into();
let mut cookie = parse_inner(&s, decode)?;
cookie.cookie_string = Some(s);
Ok(cookie)
}
#[cfg(test)]
mod tests {
use super::{Cookie, SameSite};
use chrono::Duration;
use time::strptime;
macro_rules! assert_eq_parse {
($string:expr, $expected:expr) => {
let cookie = match Cookie::parse($string) {
Ok(cookie) => cookie,
Err(e) => panic!("Failed to parse {:?}: {:?}", $string, e),
};
assert_eq!(cookie, $expected);
};
}
macro_rules! assert_ne_parse {
($string:expr, $expected:expr) => {
let cookie = match Cookie::parse($string) {
Ok(cookie) => cookie,
Err(e) => panic!("Failed to parse {:?}: {:?}", $string, e),
};
assert_ne!(cookie, $expected);
};
}
#[test]
fn parse_same_site() {
let expected = Cookie::build("foo", "bar")
.same_site(SameSite::Lax)
.finish();
assert_eq_parse!("foo=bar; SameSite=Lax", expected);
assert_eq_parse!("foo=bar; SameSite=lax", expected);
assert_eq_parse!("foo=bar; SameSite=LAX", expected);
assert_eq_parse!("foo=bar; samesite=Lax", expected);
assert_eq_parse!("foo=bar; SAMESITE=Lax", expected);
let expected = Cookie::build("foo", "bar")
.same_site(SameSite::Strict)
.finish();
assert_eq_parse!("foo=bar; SameSite=Strict", expected);
assert_eq_parse!("foo=bar; SameSITE=Strict", expected);
assert_eq_parse!("foo=bar; SameSite=strict", expected);
assert_eq_parse!("foo=bar; SameSite=STrICT", expected);
assert_eq_parse!("foo=bar; SameSite=STRICT", expected);
}
#[test]
fn parse() {
assert!(Cookie::parse("bar").is_err());
assert!(Cookie::parse("=bar").is_err());
assert!(Cookie::parse(" =bar").is_err());
assert!(Cookie::parse("foo=").is_ok());
let expected = Cookie::build("foo", "bar=baz").finish();
assert_eq_parse!("foo=bar=baz", expected);
let mut expected = Cookie::build("foo", "bar").finish();
assert_eq_parse!("foo=bar", expected);
assert_eq_parse!("foo = bar", expected);
assert_eq_parse!(" foo=bar ", expected);
assert_eq_parse!(" foo=bar ;Domain=", expected);
assert_eq_parse!(" foo=bar ;Domain= ", expected);
assert_eq_parse!(" foo=bar ;Ignored", expected);
let mut unexpected = Cookie::build("foo", "bar").http_only(false).finish();
assert_ne_parse!(" foo=bar ;HttpOnly", unexpected);
assert_ne_parse!(" foo=bar; httponly", unexpected);
expected.set_http_only(true);
assert_eq_parse!(" foo=bar ;HttpOnly", expected);
assert_eq_parse!(" foo=bar ;httponly", expected);
assert_eq_parse!(" foo=bar ;HTTPONLY=whatever", expected);
assert_eq_parse!(" foo=bar ; sekure; HTTPONLY", expected);
expected.set_secure(true);
assert_eq_parse!(" foo=bar ;HttpOnly; Secure", expected);
assert_eq_parse!(" foo=bar ;HttpOnly; Secure=aaaa", expected);
unexpected.set_http_only(true);
unexpected.set_secure(true);
assert_ne_parse!(" foo=bar ;HttpOnly; skeure", unexpected);
assert_ne_parse!(" foo=bar ;HttpOnly; =secure", unexpected);
assert_ne_parse!(" foo=bar ;HttpOnly;", unexpected);
unexpected.set_secure(false);
assert_ne_parse!(" foo=bar ;HttpOnly; secure", unexpected);
assert_ne_parse!(" foo=bar ;HttpOnly; secure", unexpected);
assert_ne_parse!(" foo=bar ;HttpOnly; secure", unexpected);
expected.set_max_age(Duration::zero());
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age=0", expected);
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age = 0 ", expected);
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age=-1", expected);
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age = -1 ", expected);
expected.set_max_age(Duration::minutes(1));
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age=60", expected);
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age = 60 ", expected);
expected.set_max_age(Duration::seconds(4));
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age=4", expected);
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age = 4 ", expected);
unexpected.set_secure(true);
unexpected.set_max_age(Duration::minutes(1));
assert_ne_parse!(" foo=bar ;HttpOnly; Secure; Max-Age=122", unexpected);
assert_ne_parse!(" foo=bar ;HttpOnly; Secure; Max-Age = 38 ", unexpected);
assert_ne_parse!(" foo=bar ;HttpOnly; Secure; Max-Age=51", unexpected);
assert_ne_parse!(" foo=bar ;HttpOnly; Secure; Max-Age = -1 ", unexpected);
assert_ne_parse!(" foo=bar ;HttpOnly; Secure; Max-Age = 0", unexpected);
expected.set_path("/");
assert_eq_parse!("foo=bar;HttpOnly; Secure; Max-Age=4; Path=/", expected);
assert_eq_parse!("foo=bar;HttpOnly; Secure; Max-Age=4;Path=/", expected);
expected.set_path("/foo");
assert_eq_parse!("foo=bar;HttpOnly; Secure; Max-Age=4; Path=/foo", expected);
assert_eq_parse!("foo=bar;HttpOnly; Secure; Max-Age=4;Path=/foo", expected);
assert_eq_parse!("foo=bar;HttpOnly; Secure; Max-Age=4;path=/foo", expected);
assert_eq_parse!("foo=bar;HttpOnly; Secure; Max-Age=4;path = /foo", expected);
unexpected.set_max_age(Duration::seconds(4));
unexpected.set_path("/bar");
assert_ne_parse!("foo=bar;HttpOnly; Secure; Max-Age=4; Path=/foo", unexpected);
assert_ne_parse!("foo=bar;HttpOnly; Secure; Max-Age=4;Path=/baz", unexpected);
expected.set_domain("www.foo.com");
assert_eq_parse!(
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
Domain=www.foo.com",
expected
);
expected.set_domain("foo.com");
assert_eq_parse!(
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
Domain=foo.com",
expected
);
assert_eq_parse!(
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
Domain=FOO.COM",
expected
);
unexpected.set_path("/foo");
unexpected.set_domain("bar.com");
assert_ne_parse!(
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
Domain=foo.com",
unexpected
);
assert_ne_parse!(
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
Domain=FOO.COM",
unexpected
);
let time_str = "Wed, 21 Oct 2015 07:28:00 GMT";
let expires = strptime(time_str, "%a, %d %b %Y %H:%M:%S %Z").unwrap();
expected.set_expires(expires);
assert_eq_parse!(
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
Domain=foo.com; Expires=Wed, 21 Oct 2015 07:28:00 GMT",
expected
);
unexpected.set_domain("foo.com");
let bad_expires = strptime(time_str, "%a, %d %b %Y %H:%S:%M %Z").unwrap();
expected.set_expires(bad_expires);
assert_ne_parse!(
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
Domain=foo.com; Expires=Wed, 21 Oct 2015 07:28:00 GMT",
unexpected
);
}
#[test]
fn odd_characters() {
let expected = Cookie::new("foo", "b%2Fr");
assert_eq_parse!("foo=b%2Fr", expected);
}
#[test]
fn odd_characters_encoded() {
let expected = Cookie::new("foo", "b/r");
let cookie = match Cookie::parse_encoded("foo=b%2Fr") {
Ok(cookie) => cookie,
Err(e) => panic!("Failed to parse: {:?}", e),
};
assert_eq!(cookie, expected);
}
#[test]
fn do_not_panic_on_large_max_ages() {
let max_seconds = Duration::max_value().num_seconds();
let expected = Cookie::build("foo", "bar").max_age(max_seconds).finish();
assert_eq_parse!(format!(" foo=bar; Max-Age={:?}", max_seconds + 1), expected);
}
}

View File

@ -0,0 +1,190 @@
use ring::hkdf::{Algorithm, KeyType, Prk, HKDF_SHA256};
use ring::rand::{SecureRandom, SystemRandom};
use super::private::KEY_LEN as PRIVATE_KEY_LEN;
use super::signed::KEY_LEN as SIGNED_KEY_LEN;
static HKDF_DIGEST: Algorithm = HKDF_SHA256;
const KEYS_INFO: &[&[u8]] = &[b"COOKIE;SIGNED:HMAC-SHA256;PRIVATE:AEAD-AES-256-GCM"];
/// A cryptographic master key for use with `Signed` and/or `Private` jars.
///
/// This structure encapsulates secure, cryptographic keys for use with both
/// [PrivateJar](struct.PrivateJar.html) and [SignedJar](struct.SignedJar.html).
/// It can be derived from a single master key via
/// [from_master](#method.from_master) or generated from a secure random source
/// via [generate](#method.generate). A single instance of `Key` can be used for
/// both a `PrivateJar` and a `SignedJar`.
///
/// This type is only available when the `secure` feature is enabled.
#[derive(Clone)]
pub struct Key {
signing_key: [u8; SIGNED_KEY_LEN],
encryption_key: [u8; PRIVATE_KEY_LEN],
}
impl KeyType for &Key {
#[inline]
fn len(&self) -> usize {
SIGNED_KEY_LEN + PRIVATE_KEY_LEN
}
}
impl Key {
/// Derives new signing/encryption keys from a master key.
///
/// The master key must be at least 256-bits (32 bytes). For security, the
/// master key _must_ be cryptographically random. The keys are derived
/// deterministically from the master key.
///
/// # Panics
///
/// Panics if `key` is less than 32 bytes in length.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Key;
///
/// # /*
/// let master_key = { /* a cryptographically random key >= 32 bytes */ };
/// # */
/// # let master_key: &Vec<u8> = &(0..32).collect();
///
/// let key = Key::from_master(master_key);
/// ```
pub fn from_master(key: &[u8]) -> Key {
if key.len() < 32 {
panic!(
"bad master key length: expected at least 32 bytes, found {}",
key.len()
);
}
// An empty `Key` structure; will be filled in with HKDF derived keys.
let mut output_key = Key {
signing_key: [0; SIGNED_KEY_LEN],
encryption_key: [0; PRIVATE_KEY_LEN],
};
// Expand the master key into two HKDF generated keys.
let mut both_keys = [0; SIGNED_KEY_LEN + PRIVATE_KEY_LEN];
let prk = Prk::new_less_safe(HKDF_DIGEST, key);
let okm = prk.expand(KEYS_INFO, &output_key).expect("okm expand");
okm.fill(&mut both_keys).expect("fill keys");
// Copy the key parts into their respective fields.
output_key
.signing_key
.copy_from_slice(&both_keys[..SIGNED_KEY_LEN]);
output_key
.encryption_key
.copy_from_slice(&both_keys[SIGNED_KEY_LEN..]);
output_key
}
/// Generates signing/encryption keys from a secure, random source. Keys are
/// generated nondeterministically.
///
/// # Panics
///
/// Panics if randomness cannot be retrieved from the operating system. See
/// [try_generate](#method.try_generate) for a non-panicking version.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Key;
///
/// let key = Key::generate();
/// ```
pub fn generate() -> Key {
Self::try_generate().expect("failed to generate `Key` from randomness")
}
/// Attempts to generate signing/encryption keys from a secure, random
/// source. Keys are generated nondeterministically. If randomness cannot be
/// retrieved from the underlying operating system, returns `None`.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Key;
///
/// let key = Key::try_generate();
/// ```
pub fn try_generate() -> Option<Key> {
let mut sign_key = [0; SIGNED_KEY_LEN];
let mut enc_key = [0; PRIVATE_KEY_LEN];
let rng = SystemRandom::new();
if rng.fill(&mut sign_key).is_err() || rng.fill(&mut enc_key).is_err() {
return None;
}
Some(Key {
signing_key: sign_key,
encryption_key: enc_key,
})
}
/// Returns the raw bytes of a key suitable for signing cookies.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Key;
///
/// let key = Key::generate();
/// let signing_key = key.signing();
/// ```
pub fn signing(&self) -> &[u8] {
&self.signing_key[..]
}
/// Returns the raw bytes of a key suitable for encrypting cookies.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Key;
///
/// let key = Key::generate();
/// let encryption_key = key.encryption();
/// ```
pub fn encryption(&self) -> &[u8] {
&self.encryption_key[..]
}
}
#[cfg(test)]
mod test {
use super::Key;
#[test]
fn deterministic_from_master() {
let master_key: Vec<u8> = (0..32).collect();
let key_a = Key::from_master(&master_key);
let key_b = Key::from_master(&master_key);
assert_eq!(key_a.signing(), key_b.signing());
assert_eq!(key_a.encryption(), key_b.encryption());
assert_ne!(key_a.encryption(), key_a.signing());
let master_key_2: Vec<u8> = (32..64).collect();
let key_2 = Key::from_master(&master_key_2);
assert_ne!(key_2.signing(), key_a.signing());
assert_ne!(key_2.encryption(), key_a.encryption());
}
#[test]
fn non_deterministic_generate() {
let key_a = Key::generate();
let key_b = Key::generate();
assert_ne!(key_a.signing(), key_b.signing());
assert_ne!(key_a.encryption(), key_b.encryption());
}
}

View File

@ -0,0 +1,40 @@
#[cfg(test)]
macro_rules! assert_simple_behaviour {
($clear:expr, $secure:expr) => {{
assert_eq!($clear.iter().count(), 0);
$secure.add(Cookie::new("name", "val"));
assert_eq!($clear.iter().count(), 1);
assert_eq!($secure.get("name").unwrap().value(), "val");
assert_ne!($clear.get("name").unwrap().value(), "val");
$secure.add(Cookie::new("another", "two"));
assert_eq!($clear.iter().count(), 2);
$clear.remove(Cookie::named("another"));
assert_eq!($clear.iter().count(), 1);
$secure.remove(Cookie::named("name"));
assert_eq!($clear.iter().count(), 0);
}};
}
#[cfg(test)]
macro_rules! assert_secure_behaviour {
($clear:expr, $secure:expr) => {{
$secure.add(Cookie::new("secure", "secure"));
assert!($clear.get("secure").unwrap().value() != "secure");
assert!($secure.get("secure").unwrap().value() == "secure");
let mut cookie = $clear.get("secure").unwrap().clone();
let new_val = format!("{}l", cookie.value());
cookie.set_value(new_val);
$clear.add(cookie);
assert!($secure.get("secure").is_none());
let mut cookie = $clear.get("secure").unwrap().clone();
cookie.set_value("foobar");
$clear.add(cookie);
assert!($secure.get("secure").is_none());
}};
}

View File

@ -0,0 +1,10 @@
//! Fork of https://github.com/alexcrichton/cookie-rs
#[macro_use]
mod macros;
mod key;
mod private;
mod signed;
pub use self::key::*;
pub use self::private::*;
pub use self::signed::*;

View File

@ -0,0 +1,275 @@
use std::str;
use log::warn;
use ring::aead::{Aad, Algorithm, Nonce, AES_256_GCM};
use ring::aead::{LessSafeKey, UnboundKey};
use ring::rand::{SecureRandom, SystemRandom};
use super::Key;
use crate::cookie::{Cookie, CookieJar};
// Keep these in sync, and keep the key len synced with the `private` docs as
// well as the `KEYS_INFO` const in secure::Key.
static ALGO: &'static Algorithm = &AES_256_GCM;
const NONCE_LEN: usize = 12;
pub const KEY_LEN: usize = 32;
/// A child cookie jar that provides authenticated encryption for its cookies.
///
/// A _private_ child jar signs and encrypts all the cookies added to it and
/// verifies and decrypts cookies retrieved from it. Any cookies stored in a
/// `PrivateJar` are simultaneously assured confidentiality, integrity, and
/// authenticity. In other words, clients cannot discover nor tamper with the
/// contents of a cookie, nor can they fabricate cookie data.
///
/// This type is only available when the `secure` feature is enabled.
pub struct PrivateJar<'a> {
parent: &'a mut CookieJar,
key: [u8; KEY_LEN],
}
impl<'a> PrivateJar<'a> {
/// Creates a new child `PrivateJar` with parent `parent` and key `key`.
/// This method is typically called indirectly via the `signed` method of
/// `CookieJar`.
#[doc(hidden)]
pub fn new(parent: &'a mut CookieJar, key: &Key) -> PrivateJar<'a> {
let mut key_array = [0u8; KEY_LEN];
key_array.copy_from_slice(key.encryption());
PrivateJar {
parent,
key: key_array,
}
}
/// Given a sealed value `str` and a key name `name`, where the nonce is
/// prepended to the original value and then both are Base64 encoded,
/// verifies and decrypts the sealed value and returns it. If there's a
/// problem, returns an `Err` with a string describing the issue.
fn unseal(&self, name: &str, value: &str) -> Result<String, &'static str> {
let mut data = base64::decode(value).map_err(|_| "bad base64 value")?;
if data.len() <= NONCE_LEN {
return Err("length of decoded data is <= NONCE_LEN");
}
let ad = Aad::from(name.as_bytes());
let key = LessSafeKey::new(
UnboundKey::new(&ALGO, &self.key).expect("matching key length"),
);
let (nonce, mut sealed) = data.split_at_mut(NONCE_LEN);
let nonce =
Nonce::try_assume_unique_for_key(nonce).expect("invalid length of `nonce`");
let unsealed = key
.open_in_place(nonce, ad, &mut sealed)
.map_err(|_| "invalid key/nonce/value: bad seal")?;
if let Ok(unsealed_utf8) = str::from_utf8(unsealed) {
Ok(unsealed_utf8.to_string())
} else {
warn!(
"Private cookie does not have utf8 content!
It is likely the secret key used to encrypt them has been leaked.
Please change it as soon as possible."
);
Err("bad unsealed utf8")
}
}
/// Returns a reference to the `Cookie` inside this jar with the name `name`
/// and authenticates and decrypts the cookie's value, returning a `Cookie`
/// with the decrypted value. If the cookie cannot be found, or the cookie
/// fails to authenticate or decrypt, `None` is returned.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie, Key};
///
/// let key = Key::generate();
/// let mut jar = CookieJar::new();
/// let mut private_jar = jar.private(&key);
/// assert!(private_jar.get("name").is_none());
///
/// private_jar.add(Cookie::new("name", "value"));
/// assert_eq!(private_jar.get("name").unwrap().value(), "value");
/// ```
pub fn get(&self, name: &str) -> Option<Cookie<'static>> {
if let Some(cookie_ref) = self.parent.get(name) {
let mut cookie = cookie_ref.clone();
if let Ok(value) = self.unseal(name, cookie.value()) {
cookie.set_value(value);
return Some(cookie);
}
}
None
}
/// Adds `cookie` to the parent jar. The cookie's value is encrypted with
/// authenticated encryption assuring confidentiality, integrity, and
/// authenticity.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie, Key};
///
/// let key = Key::generate();
/// let mut jar = CookieJar::new();
/// jar.private(&key).add(Cookie::new("name", "value"));
///
/// assert_ne!(jar.get("name").unwrap().value(), "value");
/// assert_eq!(jar.private(&key).get("name").unwrap().value(), "value");
/// ```
pub fn add(&mut self, mut cookie: Cookie<'static>) {
self.encrypt_cookie(&mut cookie);
// Add the sealed cookie to the parent.
self.parent.add(cookie);
}
/// Adds an "original" `cookie` to parent jar. The cookie's value is
/// encrypted with authenticated encryption assuring confidentiality,
/// integrity, and authenticity. Adding an original cookie does not affect
/// the [`CookieJar::delta()`](struct.CookieJar.html#method.delta)
/// computation. This method is intended to be used to seed the cookie jar
/// with cookies received from a client's HTTP message.
///
/// For accurate `delta` computations, this method should not be called
/// after calling `remove`.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie, Key};
///
/// let key = Key::generate();
/// let mut jar = CookieJar::new();
/// jar.private(&key).add_original(Cookie::new("name", "value"));
///
/// assert_eq!(jar.iter().count(), 1);
/// assert_eq!(jar.delta().count(), 0);
/// ```
pub fn add_original(&mut self, mut cookie: Cookie<'static>) {
self.encrypt_cookie(&mut cookie);
// Add the sealed cookie to the parent.
self.parent.add_original(cookie);
}
/// Encrypts the cookie's value with
/// authenticated encryption assuring confidentiality, integrity, and authenticity.
fn encrypt_cookie(&self, cookie: &mut Cookie) {
let name = cookie.name().as_bytes();
let value = cookie.value().as_bytes();
let data = encrypt_name_value(name, value, &self.key);
// Base64 encode the nonce and encrypted value.
let sealed_value = base64::encode(&data);
cookie.set_value(sealed_value);
}
/// Removes `cookie` from the parent jar.
///
/// For correct removal, the passed in `cookie` must contain the same `path`
/// and `domain` as the cookie that was initially set.
///
/// See [CookieJar::remove](struct.CookieJar.html#method.remove) for more
/// details.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie, Key};
///
/// let key = Key::generate();
/// let mut jar = CookieJar::new();
/// let mut private_jar = jar.private(&key);
///
/// private_jar.add(Cookie::new("name", "value"));
/// assert!(private_jar.get("name").is_some());
///
/// private_jar.remove(Cookie::named("name"));
/// assert!(private_jar.get("name").is_none());
/// ```
pub fn remove(&mut self, cookie: Cookie<'static>) {
self.parent.remove(cookie);
}
}
fn encrypt_name_value(name: &[u8], value: &[u8], key: &[u8]) -> Vec<u8> {
// Create the `SealingKey` structure.
let unbound = UnboundKey::new(&ALGO, key).expect("matching key length");
let key = LessSafeKey::new(unbound);
// Create a vec to hold the [nonce | cookie value | overhead].
let mut data = vec![0; NONCE_LEN + value.len() + ALGO.tag_len()];
// Randomly generate the nonce, then copy the cookie value as input.
let (nonce, in_out) = data.split_at_mut(NONCE_LEN);
let (in_out, tag) = in_out.split_at_mut(value.len());
in_out.copy_from_slice(value);
// Randomly generate the nonce into the nonce piece.
SystemRandom::new()
.fill(nonce)
.expect("couldn't random fill nonce");
let nonce = Nonce::try_assume_unique_for_key(nonce).expect("invalid `nonce` length");
// Use cookie's name as associated data to prevent value swapping.
let ad = Aad::from(name);
let ad_tag = key
.seal_in_place_separate_tag(nonce, ad, in_out)
.expect("in-place seal");
// Copy the tag into the tag piece.
tag.copy_from_slice(ad_tag.as_ref());
// Remove the overhead and return the sealed content.
data
}
#[cfg(test)]
mod test {
use super::{encrypt_name_value, Cookie, CookieJar, Key};
#[test]
fn simple() {
let key = Key::generate();
let mut jar = CookieJar::new();
assert_simple_behaviour!(jar, jar.private(&key));
}
#[test]
fn private() {
let key = Key::generate();
let mut jar = CookieJar::new();
assert_secure_behaviour!(jar, jar.private(&key));
}
#[test]
fn non_utf8() {
let key = Key::generate();
let mut jar = CookieJar::new();
let name = "malicious";
let mut assert_non_utf8 = |value: &[u8]| {
let sealed = encrypt_name_value(name.as_bytes(), value, &key.encryption());
let encoded = base64::encode(&sealed);
assert_eq!(
jar.private(&key).unseal(name, &encoded),
Err("bad unsealed utf8")
);
jar.add(Cookie::new(name, encoded));
assert_eq!(jar.private(&key).get(name), None);
};
assert_non_utf8(&[0x72, 0xfb, 0xdf, 0x74]); // rûst in ISO/IEC 8859-1
let mut malicious =
String::from(r#"{"id":"abc123??%X","admin":true}"#).into_bytes();
malicious[8] |= 0b1100_0000;
malicious[9] |= 0b1100_0000;
assert_non_utf8(&malicious);
}
}

View File

@ -0,0 +1,184 @@
use ring::hmac::{self, sign, verify};
use super::Key;
use crate::cookie::{Cookie, CookieJar};
// Keep these in sync, and keep the key len synced with the `signed` docs as
// well as the `KEYS_INFO` const in secure::Key.
static HMAC_DIGEST: hmac::Algorithm = hmac::HMAC_SHA256;
const BASE64_DIGEST_LEN: usize = 44;
pub const KEY_LEN: usize = 32;
/// A child cookie jar that authenticates its cookies.
///
/// A _signed_ child jar signs all the cookies added to it and verifies cookies
/// retrieved from it. Any cookies stored in a `SignedJar` are assured integrity
/// and authenticity. In other words, clients cannot tamper with the contents of
/// a cookie nor can they fabricate cookie values, but the data is visible in
/// plaintext.
///
/// This type is only available when the `secure` feature is enabled.
pub struct SignedJar<'a> {
parent: &'a mut CookieJar,
key: hmac::Key,
}
impl<'a> SignedJar<'a> {
/// Creates a new child `SignedJar` with parent `parent` and key `key`. This
/// method is typically called indirectly via the `signed` method of
/// `CookieJar`.
#[doc(hidden)]
pub fn new(parent: &'a mut CookieJar, key: &Key) -> SignedJar<'a> {
SignedJar {
parent,
key: hmac::Key::new(HMAC_DIGEST, key.signing()),
}
}
/// Given a signed value `str` where the signature is prepended to `value`,
/// verifies the signed value and returns it. If there's a problem, returns
/// an `Err` with a string describing the issue.
fn verify(&self, cookie_value: &str) -> Result<String, &'static str> {
if cookie_value.len() < BASE64_DIGEST_LEN {
return Err("length of value is <= BASE64_DIGEST_LEN");
}
let (digest_str, value) = cookie_value.split_at(BASE64_DIGEST_LEN);
let sig = base64::decode(digest_str).map_err(|_| "bad base64 digest")?;
verify(&self.key, value.as_bytes(), &sig)
.map(|_| value.to_string())
.map_err(|_| "value did not verify")
}
/// Returns a reference to the `Cookie` inside this jar with the name `name`
/// and verifies the authenticity and integrity of the cookie's value,
/// returning a `Cookie` with the authenticated value. If the cookie cannot
/// be found, or the cookie fails to verify, `None` is returned.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie, Key};
///
/// let key = Key::generate();
/// let mut jar = CookieJar::new();
/// let mut signed_jar = jar.signed(&key);
/// assert!(signed_jar.get("name").is_none());
///
/// signed_jar.add(Cookie::new("name", "value"));
/// assert_eq!(signed_jar.get("name").unwrap().value(), "value");
/// ```
pub fn get(&self, name: &str) -> Option<Cookie<'static>> {
if let Some(cookie_ref) = self.parent.get(name) {
let mut cookie = cookie_ref.clone();
if let Ok(value) = self.verify(cookie.value()) {
cookie.set_value(value);
return Some(cookie);
}
}
None
}
/// Adds `cookie` to the parent jar. The cookie's value is signed assuring
/// integrity and authenticity.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie, Key};
///
/// let key = Key::generate();
/// let mut jar = CookieJar::new();
/// jar.signed(&key).add(Cookie::new("name", "value"));
///
/// assert_ne!(jar.get("name").unwrap().value(), "value");
/// assert!(jar.get("name").unwrap().value().contains("value"));
/// assert_eq!(jar.signed(&key).get("name").unwrap().value(), "value");
/// ```
pub fn add(&mut self, mut cookie: Cookie<'static>) {
self.sign_cookie(&mut cookie);
self.parent.add(cookie);
}
/// Adds an "original" `cookie` to this jar. The cookie's value is signed
/// assuring integrity and authenticity. Adding an original cookie does not
/// affect the [`CookieJar::delta()`](struct.CookieJar.html#method.delta)
/// computation. This method is intended to be used to seed the cookie jar
/// with cookies received from a client's HTTP message.
///
/// For accurate `delta` computations, this method should not be called
/// after calling `remove`.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie, Key};
///
/// let key = Key::generate();
/// let mut jar = CookieJar::new();
/// jar.signed(&key).add_original(Cookie::new("name", "value"));
///
/// assert_eq!(jar.iter().count(), 1);
/// assert_eq!(jar.delta().count(), 0);
/// ```
pub fn add_original(&mut self, mut cookie: Cookie<'static>) {
self.sign_cookie(&mut cookie);
self.parent.add_original(cookie);
}
/// Signs the cookie's value assuring integrity and authenticity.
fn sign_cookie(&self, cookie: &mut Cookie) {
let digest = sign(&self.key, cookie.value().as_bytes());
let mut new_value = base64::encode(digest.as_ref());
new_value.push_str(cookie.value());
cookie.set_value(new_value);
}
/// Removes `cookie` from the parent jar.
///
/// For correct removal, the passed in `cookie` must contain the same `path`
/// and `domain` as the cookie that was initially set.
///
/// See [CookieJar::remove](struct.CookieJar.html#method.remove) for more
/// details.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie, Key};
///
/// let key = Key::generate();
/// let mut jar = CookieJar::new();
/// let mut signed_jar = jar.signed(&key);
///
/// signed_jar.add(Cookie::new("name", "value"));
/// assert!(signed_jar.get("name").is_some());
///
/// signed_jar.remove(Cookie::named("name"));
/// assert!(signed_jar.get("name").is_none());
/// ```
pub fn remove(&mut self, cookie: Cookie<'static>) {
self.parent.remove(cookie);
}
}
#[cfg(test)]
mod test {
use super::{Cookie, CookieJar, Key};
#[test]
fn simple() {
let key = Key::generate();
let mut jar = CookieJar::new();
assert_simple_behaviour!(jar, jar.signed(&key));
}
#[test]
fn private() {
let key = Key::generate();
let mut jar = CookieJar::new();
assert_secure_behaviour!(jar, jar.signed(&key));
}
}

View File

@ -0,0 +1,241 @@
use std::future::Future;
use std::io::{self, Write};
use std::pin::Pin;
use std::task::{Context, Poll};
use actix_threadpool::{run, CpuFuture};
#[cfg(feature = "brotli")]
use brotli2::write::BrotliDecoder;
use bytes::Bytes;
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
use flate2::write::{GzDecoder, ZlibDecoder};
use futures::{ready, Stream};
use super::Writer;
use crate::error::PayloadError;
use crate::http::header::{ContentEncoding, HeaderMap, CONTENT_ENCODING};
const INPLACE: usize = 2049;
pub struct Decoder<S> {
decoder: Option<ContentDecoder>,
stream: S,
eof: bool,
fut: Option<CpuFuture<Result<(Option<Bytes>, ContentDecoder), io::Error>>>,
}
impl<S> Decoder<S>
where
S: Stream<Item = Result<Bytes, PayloadError>>,
{
/// Construct a decoder.
#[inline]
pub fn new(stream: S, encoding: ContentEncoding) -> Decoder<S> {
let decoder = match encoding {
#[cfg(feature = "brotli")]
ContentEncoding::Br => Some(ContentDecoder::Br(Box::new(
BrotliDecoder::new(Writer::new()),
))),
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new(
ZlibDecoder::new(Writer::new()),
))),
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoding::Gzip => Some(ContentDecoder::Gzip(Box::new(
GzDecoder::new(Writer::new()),
))),
_ => None,
};
Decoder {
decoder,
stream,
fut: None,
eof: false,
}
}
/// Construct decoder based on headers.
#[inline]
pub fn from_headers(stream: S, headers: &HeaderMap) -> Decoder<S> {
// check content-encoding
let encoding = if let Some(enc) = headers.get(&CONTENT_ENCODING) {
if let Ok(enc) = enc.to_str() {
ContentEncoding::from(enc)
} else {
ContentEncoding::Identity
}
} else {
ContentEncoding::Identity
};
Self::new(stream, encoding)
}
}
impl<S> Stream for Decoder<S>
where
S: Stream<Item = Result<Bytes, PayloadError>> + Unpin,
{
type Item = Result<Bytes, PayloadError>;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context,
) -> Poll<Option<Self::Item>> {
loop {
if let Some(ref mut fut) = self.fut {
let (chunk, decoder) = match ready!(Pin::new(fut).poll(cx)) {
Ok(Ok(item)) => item,
Ok(Err(e)) => return Poll::Ready(Some(Err(e.into()))),
Err(e) => return Poll::Ready(Some(Err(e.into()))),
};
self.decoder = Some(decoder);
self.fut.take();
if let Some(chunk) = chunk {
return Poll::Ready(Some(Ok(chunk)));
}
}
if self.eof {
return Poll::Ready(None);
}
match Pin::new(&mut self.stream).poll_next(cx) {
Poll::Ready(Some(Err(err))) => return Poll::Ready(Some(Err(err))),
Poll::Ready(Some(Ok(chunk))) => {
if let Some(mut decoder) = self.decoder.take() {
if chunk.len() < INPLACE {
let chunk = decoder.feed_data(chunk)?;
self.decoder = Some(decoder);
if let Some(chunk) = chunk {
return Poll::Ready(Some(Ok(chunk)));
}
} else {
self.fut = Some(run(move || {
let chunk = decoder.feed_data(chunk)?;
Ok((chunk, decoder))
}));
}
continue;
} else {
return Poll::Ready(Some(Ok(chunk)));
}
}
Poll::Ready(None) => {
self.eof = true;
return if let Some(mut decoder) = self.decoder.take() {
match decoder.feed_eof() {
Ok(Some(res)) => Poll::Ready(Some(Ok(res))),
Ok(None) => Poll::Ready(None),
Err(err) => Poll::Ready(Some(Err(err.into()))),
}
} else {
Poll::Ready(None)
};
}
Poll::Pending => break,
}
}
Poll::Pending
}
}
enum ContentDecoder {
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
Deflate(Box<ZlibDecoder<Writer>>),
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
Gzip(Box<GzDecoder<Writer>>),
#[cfg(feature = "brotli")]
Br(Box<BrotliDecoder<Writer>>),
}
impl ContentDecoder {
#[allow(unreachable_patterns)]
fn feed_eof(&mut self) -> io::Result<Option<Bytes>> {
match self {
#[cfg(feature = "brotli")]
ContentDecoder::Br(ref mut decoder) => match decoder.finish() {
Ok(mut writer) => {
let b = writer.take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentDecoder::Gzip(ref mut decoder) => match decoder.try_finish() {
Ok(_) => {
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentDecoder::Deflate(ref mut decoder) => match decoder.try_finish() {
Ok(_) => {
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
_ => Ok(None),
}
}
#[allow(unreachable_patterns)]
fn feed_data(&mut self, data: Bytes) -> io::Result<Option<Bytes>> {
match self {
#[cfg(feature = "brotli")]
ContentDecoder::Br(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentDecoder::Gzip(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentDecoder::Deflate(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
_ => Ok(Some(data)),
}
}
}

View File

@ -0,0 +1,266 @@
//! Stream encoder
use std::future::Future;
use std::io::{self, Write};
use std::pin::Pin;
use std::task::{Context, Poll};
use actix_threadpool::{run, CpuFuture};
#[cfg(feature = "brotli")]
use brotli2::write::BrotliEncoder;
use bytes::Bytes;
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
use flate2::write::{GzEncoder, ZlibEncoder};
use crate::body::{Body, BodySize, MessageBody, ResponseBody};
use crate::http::header::{ContentEncoding, CONTENT_ENCODING};
use crate::http::{HeaderValue, HttpTryFrom, StatusCode};
use crate::{Error, ResponseHead};
use super::Writer;
const INPLACE: usize = 2049;
pub struct Encoder<B> {
eof: bool,
body: EncoderBody<B>,
encoder: Option<ContentEncoder>,
fut: Option<CpuFuture<Result<ContentEncoder, io::Error>>>,
}
impl<B: MessageBody> Encoder<B> {
pub fn response(
encoding: ContentEncoding,
head: &mut ResponseHead,
body: ResponseBody<B>,
) -> ResponseBody<Encoder<B>> {
let can_encode = !(head.headers().contains_key(&CONTENT_ENCODING)
|| head.status == StatusCode::SWITCHING_PROTOCOLS
|| head.status == StatusCode::NO_CONTENT
|| encoding == ContentEncoding::Identity
|| encoding == ContentEncoding::Auto);
let body = match body {
ResponseBody::Other(b) => match b {
Body::None => return ResponseBody::Other(Body::None),
Body::Empty => return ResponseBody::Other(Body::Empty),
Body::Bytes(buf) => {
if can_encode {
EncoderBody::Bytes(buf)
} else {
return ResponseBody::Other(Body::Bytes(buf));
}
}
Body::Message(stream) => EncoderBody::BoxedStream(stream),
},
ResponseBody::Body(stream) => EncoderBody::Stream(stream),
};
if can_encode {
// Modify response body only if encoder is not None
if let Some(enc) = ContentEncoder::encoder(encoding) {
update_head(encoding, head);
head.no_chunking(false);
return ResponseBody::Body(Encoder {
body,
eof: false,
fut: None,
encoder: Some(enc),
});
}
}
ResponseBody::Body(Encoder {
body,
eof: false,
fut: None,
encoder: None,
})
}
}
enum EncoderBody<B> {
Bytes(Bytes),
Stream(B),
BoxedStream(Box<dyn MessageBody>),
}
impl<B: MessageBody> MessageBody for Encoder<B> {
fn size(&self) -> BodySize {
if self.encoder.is_none() {
match self.body {
EncoderBody::Bytes(ref b) => b.size(),
EncoderBody::Stream(ref b) => b.size(),
EncoderBody::BoxedStream(ref b) => b.size(),
}
} else {
BodySize::Stream
}
}
fn poll_next(&mut self, cx: &mut Context) -> Poll<Option<Result<Bytes, Error>>> {
loop {
if self.eof {
return Poll::Ready(None);
}
if let Some(ref mut fut) = self.fut {
let mut encoder = match futures::ready!(Pin::new(fut).poll(cx)) {
Ok(Ok(item)) => item,
Ok(Err(e)) => return Poll::Ready(Some(Err(e.into()))),
Err(e) => return Poll::Ready(Some(Err(e.into()))),
};
let chunk = encoder.take();
self.encoder = Some(encoder);
self.fut.take();
if !chunk.is_empty() {
return Poll::Ready(Some(Ok(chunk)));
}
}
let result = match self.body {
EncoderBody::Bytes(ref mut b) => {
if b.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(std::mem::replace(b, Bytes::new()))))
}
}
EncoderBody::Stream(ref mut b) => b.poll_next(cx),
EncoderBody::BoxedStream(ref mut b) => b.poll_next(cx),
};
match result {
Poll::Ready(Some(Ok(chunk))) => {
if let Some(mut encoder) = self.encoder.take() {
if chunk.len() < INPLACE {
encoder.write(&chunk)?;
let chunk = encoder.take();
self.encoder = Some(encoder);
if !chunk.is_empty() {
return Poll::Ready(Some(Ok(chunk)));
}
} else {
self.fut = Some(run(move || {
encoder.write(&chunk)?;
Ok(encoder)
}));
}
} else {
return Poll::Ready(Some(Ok(chunk)));
}
}
Poll::Ready(None) => {
if let Some(encoder) = self.encoder.take() {
let chunk = encoder.finish()?;
if chunk.is_empty() {
return Poll::Ready(None);
} else {
self.eof = true;
return Poll::Ready(Some(Ok(chunk)));
}
} else {
return Poll::Ready(None);
}
}
val => return val,
}
}
}
}
fn update_head(encoding: ContentEncoding, head: &mut ResponseHead) {
head.headers_mut().insert(
CONTENT_ENCODING,
HeaderValue::try_from(Bytes::from_static(encoding.as_str().as_bytes())).unwrap(),
);
}
enum ContentEncoder {
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
Deflate(ZlibEncoder<Writer>),
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
Gzip(GzEncoder<Writer>),
#[cfg(feature = "brotli")]
Br(BrotliEncoder<Writer>),
}
impl ContentEncoder {
fn encoder(encoding: ContentEncoding) -> Option<Self> {
match encoding {
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoding::Deflate => Some(ContentEncoder::Deflate(ZlibEncoder::new(
Writer::new(),
flate2::Compression::fast(),
))),
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoding::Gzip => Some(ContentEncoder::Gzip(GzEncoder::new(
Writer::new(),
flate2::Compression::fast(),
))),
#[cfg(feature = "brotli")]
ContentEncoding::Br => {
Some(ContentEncoder::Br(BrotliEncoder::new(Writer::new(), 3)))
}
_ => None,
}
}
#[inline]
pub(crate) fn take(&mut self) -> Bytes {
match *self {
#[cfg(feature = "brotli")]
ContentEncoder::Br(ref mut encoder) => encoder.get_mut().take(),
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoder::Deflate(ref mut encoder) => encoder.get_mut().take(),
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoder::Gzip(ref mut encoder) => encoder.get_mut().take(),
}
}
fn finish(self) -> Result<Bytes, io::Error> {
match self {
#[cfg(feature = "brotli")]
ContentEncoder::Br(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err),
},
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoder::Gzip(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err),
},
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoder::Deflate(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err),
},
}
}
fn write(&mut self, data: &[u8]) -> Result<(), io::Error> {
match *self {
#[cfg(feature = "brotli")]
ContentEncoder::Br(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()),
Err(err) => {
trace!("Error decoding br encoding: {}", err);
Err(err)
}
},
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoder::Gzip(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()),
Err(err) => {
trace!("Error decoding gzip encoding: {}", err);
Err(err)
}
},
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoder::Deflate(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()),
Err(err) => {
trace!("Error decoding deflate encoding: {}", err);
Err(err)
}
},
}
}
}

View File

@ -0,0 +1,35 @@
//! Content-Encoding support
use std::io;
use bytes::{Bytes, BytesMut};
mod decoder;
mod encoder;
pub use self::decoder::Decoder;
pub use self::encoder::Encoder;
pub(self) struct Writer {
buf: BytesMut,
}
impl Writer {
fn new() -> Writer {
Writer {
buf: BytesMut::with_capacity(8192),
}
}
fn take(&mut self) -> Bytes {
self.buf.take().freeze()
}
}
impl io::Write for Writer {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.buf.extend_from_slice(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}

1204
actix-http/src/error.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,91 @@
use std::any::{Any, TypeId};
use std::fmt;
use hashbrown::HashMap;
#[derive(Default)]
/// A type map of request extensions.
pub struct Extensions {
map: HashMap<TypeId, Box<dyn Any>>,
}
impl Extensions {
/// Create an empty `Extensions`.
#[inline]
pub fn new() -> Extensions {
Extensions {
map: HashMap::default(),
}
}
/// Insert a type into this `Extensions`.
///
/// If a extension of this type already existed, it will
/// be returned.
pub fn insert<T: 'static>(&mut self, val: T) {
self.map.insert(TypeId::of::<T>(), Box::new(val));
}
/// Check if container contains entry
pub fn contains<T: 'static>(&self) -> bool {
self.map.get(&TypeId::of::<T>()).is_some()
}
/// Get a reference to a type previously inserted on this `Extensions`.
pub fn get<T: 'static>(&self) -> Option<&T> {
self.map
.get(&TypeId::of::<T>())
.and_then(|boxed| (&**boxed as &(dyn Any + 'static)).downcast_ref())
}
/// Get a mutable reference to a type previously inserted on this `Extensions`.
pub fn get_mut<T: 'static>(&mut self) -> Option<&mut T> {
self.map
.get_mut(&TypeId::of::<T>())
.and_then(|boxed| (&mut **boxed as &mut (dyn Any + 'static)).downcast_mut())
}
/// Remove a type from this `Extensions`.
///
/// If a extension of this type existed, it will be returned.
pub fn remove<T: 'static>(&mut self) -> Option<T> {
self.map.remove(&TypeId::of::<T>()).and_then(|boxed| {
(boxed as Box<dyn Any + 'static>)
.downcast()
.ok()
.map(|boxed| *boxed)
})
}
/// Clear the `Extensions` of all inserted extensions.
#[inline]
pub fn clear(&mut self) {
self.map.clear();
}
}
impl fmt::Debug for Extensions {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Extensions").finish()
}
}
#[test]
fn test_extensions() {
#[derive(Debug, PartialEq)]
struct MyType(i32);
let mut extensions = Extensions::new();
extensions.insert(5i32);
extensions.insert(MyType(10));
assert_eq!(extensions.get(), Some(&5i32));
assert_eq!(extensions.get_mut(), Some(&mut 5i32));
assert_eq!(extensions.remove::<i32>(), Some(5i32));
assert!(extensions.get::<i32>().is_none());
assert_eq!(extensions.get::<bool>(), None);
assert_eq!(extensions.get(), Some(&MyType(10)));
}

251
actix-http/src/h1/client.rs Normal file
View File

@ -0,0 +1,251 @@
#![allow(unused_imports, unused_variables, dead_code)]
use std::io::{self, Write};
use std::rc::Rc;
use actix_codec::{Decoder, Encoder};
use bitflags::bitflags;
use bytes::{BufMut, Bytes, BytesMut};
use http::header::{
HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING, UPGRADE,
};
use http::{Method, Version};
use super::decoder::{PayloadDecoder, PayloadItem, PayloadType};
use super::{decoder, encoder, reserve_readbuf};
use super::{Message, MessageType};
use crate::body::BodySize;
use crate::config::ServiceConfig;
use crate::error::{ParseError, PayloadError};
use crate::header::HeaderMap;
use crate::helpers;
use crate::message::{
ConnectionType, Head, MessagePool, RequestHead, RequestHeadType, ResponseHead,
};
bitflags! {
struct Flags: u8 {
const HEAD = 0b0000_0001;
const KEEPALIVE_ENABLED = 0b0000_1000;
const STREAM = 0b0001_0000;
}
}
const AVERAGE_HEADER_SIZE: usize = 30;
/// HTTP/1 Codec
pub struct ClientCodec {
inner: ClientCodecInner,
}
/// HTTP/1 Payload Codec
pub struct ClientPayloadCodec {
inner: ClientCodecInner,
}
struct ClientCodecInner {
config: ServiceConfig,
decoder: decoder::MessageDecoder<ResponseHead>,
payload: Option<PayloadDecoder>,
version: Version,
ctype: ConnectionType,
// encoder part
flags: Flags,
headers_size: u32,
encoder: encoder::MessageEncoder<RequestHeadType>,
}
impl Default for ClientCodec {
fn default() -> Self {
ClientCodec::new(ServiceConfig::default())
}
}
impl ClientCodec {
/// Create HTTP/1 codec.
///
/// `keepalive_enabled` how response `connection` header get generated.
pub fn new(config: ServiceConfig) -> Self {
let flags = if config.keep_alive_enabled() {
Flags::KEEPALIVE_ENABLED
} else {
Flags::empty()
};
ClientCodec {
inner: ClientCodecInner {
config,
decoder: decoder::MessageDecoder::default(),
payload: None,
version: Version::HTTP_11,
ctype: ConnectionType::Close,
flags,
headers_size: 0,
encoder: encoder::MessageEncoder::default(),
},
}
}
/// Check if request is upgrade
pub fn upgrade(&self) -> bool {
self.inner.ctype == ConnectionType::Upgrade
}
/// Check if last response is keep-alive
pub fn keepalive(&self) -> bool {
self.inner.ctype == ConnectionType::KeepAlive
}
/// Check last request's message type
pub fn message_type(&self) -> MessageType {
if self.inner.flags.contains(Flags::STREAM) {
MessageType::Stream
} else if self.inner.payload.is_none() {
MessageType::None
} else {
MessageType::Payload
}
}
/// Convert message codec to a payload codec
pub fn into_payload_codec(self) -> ClientPayloadCodec {
ClientPayloadCodec { inner: self.inner }
}
}
impl ClientPayloadCodec {
/// Check if last response is keep-alive
pub fn keepalive(&self) -> bool {
self.inner.ctype == ConnectionType::KeepAlive
}
/// Transform payload codec to a message codec
pub fn into_message_codec(self) -> ClientCodec {
ClientCodec { inner: self.inner }
}
}
impl Decoder for ClientCodec {
type Item = ResponseHead;
type Error = ParseError;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
debug_assert!(!self.inner.payload.is_some(), "Payload decoder is set");
if let Some((req, payload)) = self.inner.decoder.decode(src)? {
if let Some(ctype) = req.ctype() {
// do not use peer's keep-alive
self.inner.ctype = if ctype == ConnectionType::KeepAlive {
self.inner.ctype
} else {
ctype
};
}
if !self.inner.flags.contains(Flags::HEAD) {
match payload {
PayloadType::None => self.inner.payload = None,
PayloadType::Payload(pl) => self.inner.payload = Some(pl),
PayloadType::Stream(pl) => {
self.inner.payload = Some(pl);
self.inner.flags.insert(Flags::STREAM);
}
}
} else {
self.inner.payload = None;
}
reserve_readbuf(src);
Ok(Some(req))
} else {
Ok(None)
}
}
}
impl Decoder for ClientPayloadCodec {
type Item = Option<Bytes>;
type Error = PayloadError;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
debug_assert!(
self.inner.payload.is_some(),
"Payload decoder is not specified"
);
Ok(match self.inner.payload.as_mut().unwrap().decode(src)? {
Some(PayloadItem::Chunk(chunk)) => {
reserve_readbuf(src);
Some(Some(chunk))
}
Some(PayloadItem::Eof) => {
self.inner.payload.take();
Some(None)
}
None => None,
})
}
}
impl Encoder for ClientCodec {
type Item = Message<(RequestHeadType, BodySize)>;
type Error = io::Error;
fn encode(
&mut self,
item: Self::Item,
dst: &mut BytesMut,
) -> Result<(), Self::Error> {
match item {
Message::Item((mut head, length)) => {
let inner = &mut self.inner;
inner.version = head.as_ref().version;
inner
.flags
.set(Flags::HEAD, head.as_ref().method == Method::HEAD);
// connection status
inner.ctype = match head.as_ref().connection_type() {
ConnectionType::KeepAlive => {
if inner.flags.contains(Flags::KEEPALIVE_ENABLED) {
ConnectionType::KeepAlive
} else {
ConnectionType::Close
}
}
ConnectionType::Upgrade => ConnectionType::Upgrade,
ConnectionType::Close => ConnectionType::Close,
};
inner.encoder.encode(
dst,
&mut head,
false,
false,
inner.version,
length,
inner.ctype,
&inner.config,
)?;
}
Message::Chunk(Some(bytes)) => {
self.inner.encoder.encode_chunk(bytes.as_ref(), dst)?;
}
Message::Chunk(None) => {
self.inner.encoder.encode_eof(dst)?;
}
}
Ok(())
}
}
pub struct Writer<'a>(pub &'a mut BytesMut);
impl<'a> io::Write for Writer<'a> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.extend_from_slice(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}

253
actix-http/src/h1/codec.rs Normal file
View File

@ -0,0 +1,253 @@
#![allow(unused_imports, unused_variables, dead_code)]
use std::io::Write;
use std::{fmt, io, net};
use actix_codec::{Decoder, Encoder};
use bitflags::bitflags;
use bytes::{BufMut, Bytes, BytesMut};
use http::header::{HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING};
use http::{Method, StatusCode, Version};
use super::decoder::{PayloadDecoder, PayloadItem, PayloadType};
use super::{decoder, encoder};
use super::{Message, MessageType};
use crate::body::BodySize;
use crate::config::ServiceConfig;
use crate::error::ParseError;
use crate::helpers;
use crate::message::{ConnectionType, Head, ResponseHead};
use crate::request::Request;
use crate::response::Response;
bitflags! {
struct Flags: u8 {
const HEAD = 0b0000_0001;
const KEEPALIVE_ENABLED = 0b0000_0010;
const STREAM = 0b0000_0100;
}
}
const AVERAGE_HEADER_SIZE: usize = 30;
/// HTTP/1 Codec
pub struct Codec {
config: ServiceConfig,
decoder: decoder::MessageDecoder<Request>,
payload: Option<PayloadDecoder>,
version: Version,
ctype: ConnectionType,
// encoder part
flags: Flags,
encoder: encoder::MessageEncoder<Response<()>>,
}
impl Default for Codec {
fn default() -> Self {
Codec::new(ServiceConfig::default())
}
}
impl fmt::Debug for Codec {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "h1::Codec({:?})", self.flags)
}
}
impl Codec {
/// Create HTTP/1 codec.
///
/// `keepalive_enabled` how response `connection` header get generated.
pub fn new(config: ServiceConfig) -> Self {
let flags = if config.keep_alive_enabled() {
Flags::KEEPALIVE_ENABLED
} else {
Flags::empty()
};
Codec {
config,
flags,
decoder: decoder::MessageDecoder::default(),
payload: None,
version: Version::HTTP_11,
ctype: ConnectionType::Close,
encoder: encoder::MessageEncoder::default(),
}
}
#[inline]
/// Check if request is upgrade
pub fn upgrade(&self) -> bool {
self.ctype == ConnectionType::Upgrade
}
#[inline]
/// Check if last response is keep-alive
pub fn keepalive(&self) -> bool {
self.ctype == ConnectionType::KeepAlive
}
#[inline]
/// Check if keep-alive enabled on server level
pub fn keepalive_enabled(&self) -> bool {
self.flags.contains(Flags::KEEPALIVE_ENABLED)
}
#[inline]
/// Check last request's message type
pub fn message_type(&self) -> MessageType {
if self.flags.contains(Flags::STREAM) {
MessageType::Stream
} else if self.payload.is_none() {
MessageType::None
} else {
MessageType::Payload
}
}
#[inline]
pub fn config(&self) -> &ServiceConfig {
&self.config
}
}
impl Decoder for Codec {
type Item = Message<Request>;
type Error = ParseError;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
if self.payload.is_some() {
Ok(match self.payload.as_mut().unwrap().decode(src)? {
Some(PayloadItem::Chunk(chunk)) => Some(Message::Chunk(Some(chunk))),
Some(PayloadItem::Eof) => {
self.payload.take();
Some(Message::Chunk(None))
}
None => None,
})
} else if let Some((req, payload)) = self.decoder.decode(src)? {
let head = req.head();
self.flags.set(Flags::HEAD, head.method == Method::HEAD);
self.version = head.version;
self.ctype = head.connection_type();
if self.ctype == ConnectionType::KeepAlive
&& !self.flags.contains(Flags::KEEPALIVE_ENABLED)
{
self.ctype = ConnectionType::Close
}
match payload {
PayloadType::None => self.payload = None,
PayloadType::Payload(pl) => self.payload = Some(pl),
PayloadType::Stream(pl) => {
self.payload = Some(pl);
self.flags.insert(Flags::STREAM);
}
}
Ok(Some(Message::Item(req)))
} else {
Ok(None)
}
}
}
impl Encoder for Codec {
type Item = Message<(Response<()>, BodySize)>;
type Error = io::Error;
fn encode(
&mut self,
item: Self::Item,
dst: &mut BytesMut,
) -> Result<(), Self::Error> {
match item {
Message::Item((mut res, length)) => {
// set response version
res.head_mut().version = self.version;
// connection status
self.ctype = if let Some(ct) = res.head().ctype() {
if ct == ConnectionType::KeepAlive {
self.ctype
} else {
ct
}
} else {
self.ctype
};
// encode message
let len = dst.len();
self.encoder.encode(
dst,
&mut res,
self.flags.contains(Flags::HEAD),
self.flags.contains(Flags::STREAM),
self.version,
length,
self.ctype,
&self.config,
)?;
// self.headers_size = (dst.len() - len) as u32;
}
Message::Chunk(Some(bytes)) => {
self.encoder.encode_chunk(bytes.as_ref(), dst)?;
}
Message::Chunk(None) => {
self.encoder.encode_eof(dst)?;
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::{cmp, io};
use actix_codec::{AsyncRead, AsyncWrite};
use bytes::{Buf, Bytes, BytesMut};
use http::{Method, Version};
use super::*;
use crate::error::ParseError;
use crate::h1::Message;
use crate::httpmessage::HttpMessage;
use crate::request::Request;
#[test]
fn test_http_request_chunked_payload_and_next_message() {
let mut codec = Codec::default();
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let item = codec.decode(&mut buf).unwrap().unwrap();
let req = item.message();
assert_eq!(req.method(), Method::GET);
assert!(req.chunked().unwrap());
buf.extend(
b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n\
POST /test2 HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n"
.iter(),
);
let msg = codec.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"data");
let msg = codec.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"line");
let msg = codec.decode(&mut buf).unwrap().unwrap();
assert!(msg.eof());
// decode next message
let item = codec.decode(&mut buf).unwrap().unwrap();
let req = item.message();
assert_eq!(*req.method(), Method::POST);
assert!(req.chunked().unwrap());
}
}

1221
actix-http/src/h1/decoder.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,923 @@
use std::collections::VecDeque;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Instant;
use std::{fmt, io, net};
use actix_codec::{AsyncRead, Decoder, Encoder, Framed, FramedParts};
use actix_rt::time::{delay, Delay};
use actix_server_config::IoStream;
use actix_service::Service;
use bitflags::bitflags;
use bytes::{BufMut, BytesMut};
use log::{error, trace};
use crate::body::{Body, BodySize, MessageBody, ResponseBody};
use crate::cloneable::CloneableService;
use crate::config::ServiceConfig;
use crate::error::{DispatchError, Error};
use crate::error::{ParseError, PayloadError};
use crate::helpers::DataFactory;
use crate::httpmessage::HttpMessage;
use crate::request::Request;
use crate::response::Response;
use super::codec::Codec;
use super::payload::{Payload, PayloadSender, PayloadStatus};
use super::{Message, MessageType};
const LW_BUFFER_SIZE: usize = 4096;
const HW_BUFFER_SIZE: usize = 32_768;
const MAX_PIPELINED_MESSAGES: usize = 16;
bitflags! {
pub struct Flags: u8 {
const STARTED = 0b0000_0001;
const KEEPALIVE = 0b0000_0010;
const POLLED = 0b0000_0100;
const SHUTDOWN = 0b0000_1000;
const READ_DISCONNECT = 0b0001_0000;
const WRITE_DISCONNECT = 0b0010_0000;
const UPGRADE = 0b0100_0000;
}
}
/// Dispatcher for HTTP/1.1 protocol
pub struct Dispatcher<T, S, B, X, U>
where
S: Service<Request = Request>,
S::Error: Into<Error>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
inner: DispatcherState<T, S, B, X, U>,
}
enum DispatcherState<T, S, B, X, U>
where
S: Service<Request = Request>,
S::Error: Into<Error>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
Normal(InnerDispatcher<T, S, B, X, U>),
Upgrade(U::Future),
None,
}
struct InnerDispatcher<T, S, B, X, U>
where
S: Service<Request = Request>,
S::Error: Into<Error>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
service: CloneableService<S>,
expect: CloneableService<X>,
upgrade: Option<CloneableService<U>>,
on_connect: Option<Box<dyn DataFactory>>,
flags: Flags,
peer_addr: Option<net::SocketAddr>,
error: Option<DispatchError>,
state: State<S, B, X>,
payload: Option<PayloadSender>,
messages: VecDeque<DispatcherMessage>,
ka_expire: Instant,
ka_timer: Option<Delay>,
io: T,
read_buf: BytesMut,
write_buf: BytesMut,
codec: Codec,
}
enum DispatcherMessage {
Item(Request),
Upgrade(Request),
Error(Response<()>),
}
enum State<S, B, X>
where
S: Service<Request = Request>,
X: Service<Request = Request, Response = Request>,
B: MessageBody,
{
None,
ExpectCall(X::Future),
ServiceCall(S::Future),
SendPayload(ResponseBody<B>),
}
impl<S, B, X> State<S, B, X>
where
S: Service<Request = Request>,
X: Service<Request = Request, Response = Request>,
B: MessageBody,
{
fn is_empty(&self) -> bool {
if let State::None = self {
true
} else {
false
}
}
fn is_call(&self) -> bool {
if let State::ServiceCall(_) = self {
true
} else {
false
}
}
}
enum PollResponse {
Upgrade(Request),
DoNothing,
DrainWriteBuf,
}
impl PartialEq for PollResponse {
fn eq(&self, other: &PollResponse) -> bool {
match self {
PollResponse::DrainWriteBuf => match other {
PollResponse::DrainWriteBuf => true,
_ => false,
},
PollResponse::DoNothing => match other {
PollResponse::DoNothing => true,
_ => false,
},
_ => false,
}
}
}
impl<T, S, B, X, U> Dispatcher<T, S, B, X, U>
where
T: IoStream,
S: Service<Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
/// Create http/1 dispatcher.
pub(crate) fn new(
stream: T,
config: ServiceConfig,
service: CloneableService<S>,
expect: CloneableService<X>,
upgrade: Option<CloneableService<U>>,
on_connect: Option<Box<dyn DataFactory>>,
) -> Self {
Dispatcher::with_timeout(
stream,
Codec::new(config.clone()),
config,
BytesMut::with_capacity(HW_BUFFER_SIZE),
None,
service,
expect,
upgrade,
on_connect,
)
}
/// Create http/1 dispatcher with slow request timeout.
pub(crate) fn with_timeout(
io: T,
codec: Codec,
config: ServiceConfig,
read_buf: BytesMut,
timeout: Option<Delay>,
service: CloneableService<S>,
expect: CloneableService<X>,
upgrade: Option<CloneableService<U>>,
on_connect: Option<Box<dyn DataFactory>>,
) -> Self {
let keepalive = config.keep_alive_enabled();
let flags = if keepalive {
Flags::KEEPALIVE
} else {
Flags::empty()
};
// keep-alive timer
let (ka_expire, ka_timer) = if let Some(delay) = timeout {
(delay.deadline(), Some(delay))
} else if let Some(delay) = config.keep_alive_timer() {
(delay.deadline(), Some(delay))
} else {
(config.now(), None)
};
Dispatcher {
inner: DispatcherState::Normal(InnerDispatcher {
write_buf: BytesMut::with_capacity(HW_BUFFER_SIZE),
payload: None,
state: State::None,
error: None,
peer_addr: io.peer_addr(),
messages: VecDeque::new(),
io,
codec,
read_buf,
service,
expect,
upgrade,
on_connect,
flags,
ka_expire,
ka_timer,
}),
}
}
}
impl<T, S, B, X, U> InnerDispatcher<T, S, B, X, U>
where
T: IoStream,
S: Service<Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
fn can_read(&self, cx: &mut Context) -> bool {
if self
.flags
.intersects(Flags::READ_DISCONNECT | Flags::UPGRADE)
{
false
} else if let Some(ref info) = self.payload {
info.need_read(cx) == PayloadStatus::Read
} else {
true
}
}
// if checked is set to true, delay disconnect until all tasks have finished.
fn client_disconnected(&mut self) {
self.flags
.insert(Flags::READ_DISCONNECT | Flags::WRITE_DISCONNECT);
if let Some(mut payload) = self.payload.take() {
payload.set_error(PayloadError::Incomplete(None));
}
}
/// Flush stream
///
/// true - got whouldblock
/// false - didnt get whouldblock
fn poll_flush(&mut self, cx: &mut Context) -> Result<bool, DispatchError> {
if self.write_buf.is_empty() {
return Ok(false);
}
let len = self.write_buf.len();
let mut written = 0;
while written < len {
match unsafe { Pin::new_unchecked(&mut self.io) }
.poll_write(cx, &self.write_buf[written..])
{
Poll::Ready(Ok(0)) => {
return Err(DispatchError::Io(io::Error::new(
io::ErrorKind::WriteZero,
"",
)));
}
Poll::Ready(Ok(n)) => {
written += n;
}
Poll::Pending => {
if written > 0 {
let _ = self.write_buf.split_to(written);
}
return Ok(true);
}
Poll::Ready(Err(err)) => return Err(DispatchError::Io(err)),
}
}
if written == self.write_buf.len() {
unsafe { self.write_buf.set_len(0) }
} else {
let _ = self.write_buf.split_to(written);
}
Ok(false)
}
fn send_response(
&mut self,
message: Response<()>,
body: ResponseBody<B>,
) -> Result<State<S, B, X>, DispatchError> {
self.codec
.encode(Message::Item((message, body.size())), &mut self.write_buf)
.map_err(|err| {
if let Some(mut payload) = self.payload.take() {
payload.set_error(PayloadError::Incomplete(None));
}
DispatchError::Io(err)
})?;
self.flags.set(Flags::KEEPALIVE, self.codec.keepalive());
match body.size() {
BodySize::None | BodySize::Empty => Ok(State::None),
_ => Ok(State::SendPayload(body)),
}
}
fn send_continue(&mut self) {
self.write_buf
.extend_from_slice(b"HTTP/1.1 100 Continue\r\n\r\n");
}
fn poll_response(
&mut self,
cx: &mut Context,
) -> Result<PollResponse, DispatchError> {
loop {
let state = match self.state {
State::None => match self.messages.pop_front() {
Some(DispatcherMessage::Item(req)) => {
Some(self.handle_request(req, cx)?)
}
Some(DispatcherMessage::Error(res)) => {
Some(self.send_response(res, ResponseBody::Other(Body::Empty))?)
}
Some(DispatcherMessage::Upgrade(req)) => {
return Ok(PollResponse::Upgrade(req));
}
None => None,
},
State::ExpectCall(ref mut fut) => {
match unsafe { Pin::new_unchecked(fut) }.poll(cx) {
Poll::Ready(Ok(req)) => {
self.send_continue();
self.state = State::ServiceCall(self.service.call(req));
continue;
}
Poll::Ready(Err(e)) => {
let res: Response = e.into().into();
let (res, body) = res.replace_body(());
Some(self.send_response(res, body.into_body())?)
}
Poll::Pending => None,
}
}
State::ServiceCall(ref mut fut) => {
match unsafe { Pin::new_unchecked(fut) }.poll(cx) {
Poll::Ready(Ok(res)) => {
let (res, body) = res.into().replace_body(());
self.state = self.send_response(res, body)?;
continue;
}
Poll::Ready(Err(e)) => {
let res: Response = e.into().into();
let (res, body) = res.replace_body(());
Some(self.send_response(res, body.into_body())?)
}
Poll::Pending => None,
}
}
State::SendPayload(ref mut stream) => {
loop {
if self.write_buf.len() < HW_BUFFER_SIZE {
match stream.poll_next(cx) {
Poll::Ready(Some(Ok(item))) => {
self.codec.encode(
Message::Chunk(Some(item)),
&mut self.write_buf,
)?;
continue;
}
Poll::Ready(None) => {
self.codec.encode(
Message::Chunk(None),
&mut self.write_buf,
)?;
self.state = State::None;
}
Poll::Ready(Some(Err(_))) => {
return Err(DispatchError::Unknown)
}
Poll::Pending => return Ok(PollResponse::DoNothing),
}
} else {
return Ok(PollResponse::DrainWriteBuf);
}
break;
}
continue;
}
};
// set new state
if let Some(state) = state {
self.state = state;
if !self.state.is_empty() {
continue;
}
} else {
// if read-backpressure is enabled and we consumed some data.
// we may read more data and retry
if self.state.is_call() {
if self.poll_request(cx)? {
continue;
}
} else if !self.messages.is_empty() {
continue;
}
}
break;
}
Ok(PollResponse::DoNothing)
}
fn handle_request(
&mut self,
req: Request,
cx: &mut Context,
) -> Result<State<S, B, X>, DispatchError> {
// Handle `EXPECT: 100-Continue` header
let req = if req.head().expect() {
let mut task = self.expect.call(req);
match unsafe { Pin::new_unchecked(&mut task) }.poll(cx) {
Poll::Ready(Ok(req)) => {
self.send_continue();
req
}
Poll::Pending => return Ok(State::ExpectCall(task)),
Poll::Ready(Err(e)) => {
let e = e.into();
let res: Response = e.into();
let (res, body) = res.replace_body(());
return self.send_response(res, body.into_body());
}
}
} else {
req
};
// Call service
let mut task = self.service.call(req);
match unsafe { Pin::new_unchecked(&mut task) }.poll(cx) {
Poll::Ready(Ok(res)) => {
let (res, body) = res.into().replace_body(());
self.send_response(res, body)
}
Poll::Pending => Ok(State::ServiceCall(task)),
Poll::Ready(Err(e)) => {
let res: Response = e.into().into();
let (res, body) = res.replace_body(());
self.send_response(res, body.into_body())
}
}
}
/// Process one incoming requests
pub(self) fn poll_request(
&mut self,
cx: &mut Context,
) -> Result<bool, DispatchError> {
// limit a mount of non processed requests
if self.messages.len() >= MAX_PIPELINED_MESSAGES || !self.can_read(cx) {
return Ok(false);
}
let mut updated = false;
loop {
match self.codec.decode(&mut self.read_buf) {
Ok(Some(msg)) => {
updated = true;
self.flags.insert(Flags::STARTED);
match msg {
Message::Item(mut req) => {
let pl = self.codec.message_type();
req.head_mut().peer_addr = self.peer_addr;
// set on_connect data
if let Some(ref on_connect) = self.on_connect {
on_connect.set(&mut req.extensions_mut());
}
if pl == MessageType::Stream && self.upgrade.is_some() {
self.messages.push_back(DispatcherMessage::Upgrade(req));
break;
}
if pl == MessageType::Payload || pl == MessageType::Stream {
let (ps, pl) = Payload::create(false);
let (req1, _) =
req.replace_payload(crate::Payload::H1(pl));
req = req1;
self.payload = Some(ps);
}
// handle request early
if self.state.is_empty() {
self.state = self.handle_request(req, cx)?;
} else {
self.messages.push_back(DispatcherMessage::Item(req));
}
}
Message::Chunk(Some(chunk)) => {
if let Some(ref mut payload) = self.payload {
payload.feed_data(chunk);
} else {
error!(
"Internal server error: unexpected payload chunk"
);
self.flags.insert(Flags::READ_DISCONNECT);
self.messages.push_back(DispatcherMessage::Error(
Response::InternalServerError().finish().drop_body(),
));
self.error = Some(DispatchError::InternalError);
break;
}
}
Message::Chunk(None) => {
if let Some(mut payload) = self.payload.take() {
payload.feed_eof();
} else {
error!("Internal server error: unexpected eof");
self.flags.insert(Flags::READ_DISCONNECT);
self.messages.push_back(DispatcherMessage::Error(
Response::InternalServerError().finish().drop_body(),
));
self.error = Some(DispatchError::InternalError);
break;
}
}
}
}
Ok(None) => break,
Err(ParseError::Io(e)) => {
self.client_disconnected();
self.error = Some(DispatchError::Io(e));
break;
}
Err(e) => {
if let Some(mut payload) = self.payload.take() {
payload.set_error(PayloadError::EncodingCorrupted);
}
// Malformed requests should be responded with 400
self.messages.push_back(DispatcherMessage::Error(
Response::BadRequest().finish().drop_body(),
));
self.flags.insert(Flags::READ_DISCONNECT);
self.error = Some(e.into());
break;
}
}
}
if updated && self.ka_timer.is_some() {
if let Some(expire) = self.codec.config().keep_alive_expire() {
self.ka_expire = expire;
}
}
Ok(updated)
}
/// keep-alive timer
fn poll_keepalive(&mut self, cx: &mut Context) -> Result<(), DispatchError> {
if self.ka_timer.is_none() {
// shutdown timeout
if self.flags.contains(Flags::SHUTDOWN) {
if let Some(interval) = self.codec.config().client_disconnect_timer() {
self.ka_timer = Some(delay(interval));
} else {
self.flags.insert(Flags::READ_DISCONNECT);
if let Some(mut payload) = self.payload.take() {
payload.set_error(PayloadError::Incomplete(None));
}
return Ok(());
}
} else {
return Ok(());
}
}
match Pin::new(&mut self.ka_timer.as_mut().unwrap()).poll(cx) {
Poll::Ready(()) => {
// if we get timeout during shutdown, drop connection
if self.flags.contains(Flags::SHUTDOWN) {
return Err(DispatchError::DisconnectTimeout);
} else if self.ka_timer.as_mut().unwrap().deadline() >= self.ka_expire {
// check for any outstanding tasks
if self.state.is_empty() && self.write_buf.is_empty() {
if self.flags.contains(Flags::STARTED) {
trace!("Keep-alive timeout, close connection");
self.flags.insert(Flags::SHUTDOWN);
// start shutdown timer
if let Some(deadline) =
self.codec.config().client_disconnect_timer()
{
if let Some(mut timer) = self.ka_timer.as_mut() {
timer.reset(deadline);
let _ = Pin::new(&mut timer).poll(cx);
}
} else {
// no shutdown timeout, drop socket
self.flags.insert(Flags::WRITE_DISCONNECT);
return Ok(());
}
} else {
// timeout on first request (slow request) return 408
if !self.flags.contains(Flags::STARTED) {
trace!("Slow request timeout");
let _ = self.send_response(
Response::RequestTimeout().finish().drop_body(),
ResponseBody::Other(Body::Empty),
);
} else {
trace!("Keep-alive connection timeout");
}
self.flags.insert(Flags::STARTED | Flags::SHUTDOWN);
self.state = State::None;
}
} else if let Some(deadline) =
self.codec.config().keep_alive_expire()
{
if let Some(mut timer) = self.ka_timer.as_mut() {
timer.reset(deadline);
let _ = Pin::new(&mut timer).poll(cx);
}
}
} else if let Some(mut timer) = self.ka_timer.as_mut() {
timer.reset(self.ka_expire);
let _ = Pin::new(&mut timer).poll(cx);
}
}
Poll::Pending => (),
}
Ok(())
}
}
impl<T, S, B, X, U> Unpin for Dispatcher<T, S, B, X, U>
where
T: IoStream,
S: Service<Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
}
impl<T, S, B, X, U> Future for Dispatcher<T, S, B, X, U>
where
T: IoStream,
S: Service<Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
type Output = Result<(), DispatchError>;
#[inline]
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
match self.as_mut().inner {
DispatcherState::Normal(ref mut inner) => {
inner.poll_keepalive(cx)?;
if inner.flags.contains(Flags::SHUTDOWN) {
if inner.flags.contains(Flags::WRITE_DISCONNECT) {
Poll::Ready(Ok(()))
} else {
// flush buffer
inner.poll_flush(cx)?;
if !inner.write_buf.is_empty() {
Poll::Pending
} else {
match Pin::new(&mut inner.io).poll_shutdown(cx) {
Poll::Ready(res) => {
Poll::Ready(res.map_err(DispatchError::from))
}
Poll::Pending => Poll::Pending,
}
}
}
} else {
// read socket into a buf
let should_disconnect =
if !inner.flags.contains(Flags::READ_DISCONNECT) {
read_available(cx, &mut inner.io, &mut inner.read_buf)?
} else {
None
};
inner.poll_request(cx)?;
if let Some(true) = should_disconnect {
inner.flags.insert(Flags::READ_DISCONNECT);
if let Some(mut payload) = inner.payload.take() {
payload.feed_eof();
}
};
loop {
if inner.write_buf.remaining_mut() < LW_BUFFER_SIZE {
inner.write_buf.reserve(HW_BUFFER_SIZE);
}
let result = inner.poll_response(cx)?;
let drain = result == PollResponse::DrainWriteBuf;
// switch to upgrade handler
if let PollResponse::Upgrade(req) = result {
if let DispatcherState::Normal(inner) =
std::mem::replace(&mut self.inner, DispatcherState::None)
{
let mut parts = FramedParts::with_read_buf(
inner.io,
inner.codec,
inner.read_buf,
);
parts.write_buf = inner.write_buf;
let framed = Framed::from_parts(parts);
self.inner = DispatcherState::Upgrade(
inner.upgrade.unwrap().call((req, framed)),
);
return self.poll(cx);
} else {
panic!()
}
}
// we didnt get WouldBlock from write operation,
// so data get written to kernel completely (OSX)
// and we have to write again otherwise response can get stuck
if inner.poll_flush(cx)? || !drain {
break;
}
}
// client is gone
if inner.flags.contains(Flags::WRITE_DISCONNECT) {
return Poll::Ready(Ok(()));
}
let is_empty = inner.state.is_empty();
// read half is closed and we do not processing any responses
if inner.flags.contains(Flags::READ_DISCONNECT) && is_empty {
inner.flags.insert(Flags::SHUTDOWN);
}
// keep-alive and stream errors
if is_empty && inner.write_buf.is_empty() {
if let Some(err) = inner.error.take() {
Poll::Ready(Err(err))
}
// disconnect if keep-alive is not enabled
else if inner.flags.contains(Flags::STARTED)
&& !inner.flags.intersects(Flags::KEEPALIVE)
{
inner.flags.insert(Flags::SHUTDOWN);
self.poll(cx)
}
// disconnect if shutdown
else if inner.flags.contains(Flags::SHUTDOWN) {
self.poll(cx)
} else {
Poll::Pending
}
} else {
Poll::Pending
}
}
}
DispatcherState::Upgrade(ref mut fut) => {
unsafe { Pin::new_unchecked(fut) }.poll(cx).map_err(|e| {
error!("Upgrade handler error: {}", e);
DispatchError::Upgrade
})
}
DispatcherState::None => panic!(),
}
}
}
fn read_available<T>(
cx: &mut Context,
io: &mut T,
buf: &mut BytesMut,
) -> Result<Option<bool>, io::Error>
where
T: AsyncRead + Unpin,
{
let mut read_some = false;
loop {
if buf.remaining_mut() < LW_BUFFER_SIZE {
buf.reserve(HW_BUFFER_SIZE);
}
match read(cx, io, buf) {
Poll::Pending => {
return if read_some { Ok(Some(false)) } else { Ok(None) };
}
Poll::Ready(Ok(n)) => {
if n == 0 {
return Ok(Some(true));
} else {
read_some = true;
}
}
Poll::Ready(Err(e)) => {
return if e.kind() == io::ErrorKind::WouldBlock {
if read_some {
Ok(Some(false))
} else {
Ok(None)
}
} else if e.kind() == io::ErrorKind::ConnectionReset && read_some {
Ok(Some(true))
} else {
Err(e)
}
}
}
}
}
fn read<T>(
cx: &mut Context,
io: &mut T,
buf: &mut BytesMut,
) -> Poll<Result<usize, io::Error>>
where
T: AsyncRead + Unpin,
{
Pin::new(io).poll_read_buf(cx, buf)
}
#[cfg(test)]
mod tests {
use actix_service::IntoService;
use futures::future::{lazy, ok};
use super::*;
use crate::error::Error;
use crate::h1::{ExpectHandler, UpgradeHandler};
use crate::test::TestBuffer;
#[actix_rt::test]
async fn test_req_parse_err() {
lazy(|cx| {
let buf = TestBuffer::new("GET /test HTTP/1\r\n\r\n");
let mut h1 = Dispatcher::<_, _, _, _, UpgradeHandler<TestBuffer>>::new(
buf,
ServiceConfig::default(),
CloneableService::new(
(|_| ok::<_, Error>(Response::Ok().finish())).into_service(),
),
CloneableService::new(ExpectHandler),
None,
None,
);
match Pin::new(&mut h1).poll(cx) {
Poll::Pending => panic!(),
Poll::Ready(res) => assert!(res.is_err()),
}
if let DispatcherState::Normal(ref inner) = h1.inner {
assert!(inner.flags.contains(Flags::READ_DISCONNECT));
assert_eq!(&inner.io.write_buf[..26], b"HTTP/1.1 400 Bad Request\r\n");
}
})
.await;
}
}

View File

@ -0,0 +1,636 @@
#![allow(unused_imports, unused_variables, dead_code)]
use std::fmt::Write as FmtWrite;
use std::io::Write;
use std::marker::PhantomData;
use std::rc::Rc;
use std::str::FromStr;
use std::{cmp, fmt, io, mem};
use bytes::{BufMut, Bytes, BytesMut};
use crate::body::BodySize;
use crate::config::ServiceConfig;
use crate::header::{map, ContentEncoding};
use crate::helpers;
use crate::http::header::{
HeaderValue, ACCEPT_ENCODING, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING,
};
use crate::http::{HeaderMap, Method, StatusCode, Version};
use crate::message::{ConnectionType, Head, RequestHead, RequestHeadType, ResponseHead};
use crate::request::Request;
use crate::response::Response;
const AVERAGE_HEADER_SIZE: usize = 30;
#[derive(Debug)]
pub(crate) struct MessageEncoder<T: MessageType> {
pub length: BodySize,
pub te: TransferEncoding,
_t: PhantomData<T>,
}
impl<T: MessageType> Default for MessageEncoder<T> {
fn default() -> Self {
MessageEncoder {
length: BodySize::None,
te: TransferEncoding::empty(),
_t: PhantomData,
}
}
}
pub(crate) trait MessageType: Sized {
fn status(&self) -> Option<StatusCode>;
fn headers(&self) -> &HeaderMap;
fn extra_headers(&self) -> Option<&HeaderMap>;
fn camel_case(&self) -> bool {
false
}
fn chunked(&self) -> bool;
fn encode_status(&mut self, dst: &mut BytesMut) -> io::Result<()>;
fn encode_headers(
&mut self,
dst: &mut BytesMut,
version: Version,
mut length: BodySize,
ctype: ConnectionType,
config: &ServiceConfig,
) -> io::Result<()> {
let chunked = self.chunked();
let mut skip_len = length != BodySize::Stream;
let camel_case = self.camel_case();
// Content length
if let Some(status) = self.status() {
match status {
StatusCode::NO_CONTENT
| StatusCode::CONTINUE
| StatusCode::PROCESSING => length = BodySize::None,
StatusCode::SWITCHING_PROTOCOLS => {
skip_len = true;
length = BodySize::Stream;
}
_ => (),
}
}
match length {
BodySize::Stream => {
if chunked {
if camel_case {
dst.put_slice(b"\r\nTransfer-Encoding: chunked\r\n")
} else {
dst.put_slice(b"\r\ntransfer-encoding: chunked\r\n")
}
} else {
skip_len = false;
dst.put_slice(b"\r\n");
}
}
BodySize::Empty => {
if camel_case {
dst.put_slice(b"\r\nContent-Length: 0\r\n");
} else {
dst.put_slice(b"\r\ncontent-length: 0\r\n");
}
}
BodySize::Sized(len) => helpers::write_content_length(len, dst),
BodySize::Sized64(len) => {
if camel_case {
dst.put_slice(b"\r\nContent-Length: ");
} else {
dst.put_slice(b"\r\ncontent-length: ");
}
write!(dst.writer(), "{}\r\n", len)?;
}
BodySize::None => dst.put_slice(b"\r\n"),
}
// Connection
match ctype {
ConnectionType::Upgrade => dst.put_slice(b"connection: upgrade\r\n"),
ConnectionType::KeepAlive if version < Version::HTTP_11 => {
if camel_case {
dst.put_slice(b"Connection: keep-alive\r\n")
} else {
dst.put_slice(b"connection: keep-alive\r\n")
}
}
ConnectionType::Close if version >= Version::HTTP_11 => {
if camel_case {
dst.put_slice(b"Connection: close\r\n")
} else {
dst.put_slice(b"connection: close\r\n")
}
}
_ => (),
}
// merging headers from head and extra headers. HeaderMap::new() does not allocate.
let empty_headers = HeaderMap::new();
let extra_headers = self.extra_headers().unwrap_or(&empty_headers);
let headers = self
.headers()
.inner
.iter()
.filter(|(name, _)| !extra_headers.contains_key(*name))
.chain(extra_headers.inner.iter());
// write headers
let mut pos = 0;
let mut has_date = false;
let mut remaining = dst.remaining_mut();
let mut buf = unsafe { &mut *(dst.bytes_mut() as *mut [u8]) };
for (key, value) in headers {
match *key {
CONNECTION => continue,
TRANSFER_ENCODING | CONTENT_LENGTH if skip_len => continue,
DATE => {
has_date = true;
}
_ => (),
}
let k = key.as_str().as_bytes();
match value {
map::Value::One(ref val) => {
let v = val.as_ref();
let len = k.len() + v.len() + 4;
if len > remaining {
unsafe {
dst.advance_mut(pos);
}
pos = 0;
dst.reserve(len * 2);
remaining = dst.remaining_mut();
unsafe {
buf = &mut *(dst.bytes_mut() as *mut _);
}
}
// use upper Camel-Case
if camel_case {
write_camel_case(k, &mut buf[pos..pos + k.len()]);
} else {
buf[pos..pos + k.len()].copy_from_slice(k);
}
pos += k.len();
buf[pos..pos + 2].copy_from_slice(b": ");
pos += 2;
buf[pos..pos + v.len()].copy_from_slice(v);
pos += v.len();
buf[pos..pos + 2].copy_from_slice(b"\r\n");
pos += 2;
remaining -= len;
}
map::Value::Multi(ref vec) => {
for val in vec {
let v = val.as_ref();
let len = k.len() + v.len() + 4;
if len > remaining {
unsafe {
dst.advance_mut(pos);
}
pos = 0;
dst.reserve(len * 2);
remaining = dst.remaining_mut();
unsafe {
buf = &mut *(dst.bytes_mut() as *mut _);
}
}
// use upper Camel-Case
if camel_case {
write_camel_case(k, &mut buf[pos..pos + k.len()]);
} else {
buf[pos..pos + k.len()].copy_from_slice(k);
}
pos += k.len();
buf[pos..pos + 2].copy_from_slice(b": ");
pos += 2;
buf[pos..pos + v.len()].copy_from_slice(v);
pos += v.len();
buf[pos..pos + 2].copy_from_slice(b"\r\n");
pos += 2;
remaining -= len;
}
}
}
}
unsafe {
dst.advance_mut(pos);
}
// optimized date header, set_date writes \r\n
if !has_date {
config.set_date(dst);
} else {
// msg eof
dst.extend_from_slice(b"\r\n");
}
Ok(())
}
}
impl MessageType for Response<()> {
fn status(&self) -> Option<StatusCode> {
Some(self.head().status)
}
fn chunked(&self) -> bool {
self.head().chunked()
}
fn headers(&self) -> &HeaderMap {
&self.head().headers
}
fn extra_headers(&self) -> Option<&HeaderMap> {
None
}
fn encode_status(&mut self, dst: &mut BytesMut) -> io::Result<()> {
let head = self.head();
let reason = head.reason().as_bytes();
dst.reserve(256 + head.headers.len() * AVERAGE_HEADER_SIZE + reason.len());
// status line
helpers::write_status_line(head.version, head.status.as_u16(), dst);
dst.put_slice(reason);
Ok(())
}
}
impl MessageType for RequestHeadType {
fn status(&self) -> Option<StatusCode> {
None
}
fn chunked(&self) -> bool {
self.as_ref().chunked()
}
fn camel_case(&self) -> bool {
self.as_ref().camel_case_headers()
}
fn headers(&self) -> &HeaderMap {
self.as_ref().headers()
}
fn extra_headers(&self) -> Option<&HeaderMap> {
self.extra_headers()
}
fn encode_status(&mut self, dst: &mut BytesMut) -> io::Result<()> {
let head = self.as_ref();
dst.reserve(256 + head.headers.len() * AVERAGE_HEADER_SIZE);
write!(
Writer(dst),
"{} {} {}",
head.method,
head.uri.path_and_query().map(|u| u.as_str()).unwrap_or("/"),
match head.version {
Version::HTTP_09 => "HTTP/0.9",
Version::HTTP_10 => "HTTP/1.0",
Version::HTTP_11 => "HTTP/1.1",
Version::HTTP_2 => "HTTP/2.0",
}
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
}
}
impl<T: MessageType> MessageEncoder<T> {
/// Encode message
pub fn encode_chunk(&mut self, msg: &[u8], buf: &mut BytesMut) -> io::Result<bool> {
self.te.encode(msg, buf)
}
/// Encode eof
pub fn encode_eof(&mut self, buf: &mut BytesMut) -> io::Result<()> {
self.te.encode_eof(buf)
}
pub fn encode(
&mut self,
dst: &mut BytesMut,
message: &mut T,
head: bool,
stream: bool,
version: Version,
length: BodySize,
ctype: ConnectionType,
config: &ServiceConfig,
) -> io::Result<()> {
// transfer encoding
if !head {
self.te = match length {
BodySize::Empty => TransferEncoding::empty(),
BodySize::Sized(len) => TransferEncoding::length(len as u64),
BodySize::Sized64(len) => TransferEncoding::length(len),
BodySize::Stream => {
if message.chunked() && !stream {
TransferEncoding::chunked()
} else {
TransferEncoding::eof()
}
}
BodySize::None => TransferEncoding::empty(),
};
} else {
self.te = TransferEncoding::empty();
}
message.encode_status(dst)?;
message.encode_headers(dst, version, length, ctype, config)
}
}
/// Encoders to handle different Transfer-Encodings.
#[derive(Debug)]
pub(crate) struct TransferEncoding {
kind: TransferEncodingKind,
}
#[derive(Debug, PartialEq, Clone)]
enum TransferEncodingKind {
/// An Encoder for when Transfer-Encoding includes `chunked`.
Chunked(bool),
/// An Encoder for when Content-Length is set.
///
/// Enforces that the body is not longer than the Content-Length header.
Length(u64),
/// An Encoder for when Content-Length is not known.
///
/// Application decides when to stop writing.
Eof,
}
impl TransferEncoding {
#[inline]
pub fn empty() -> TransferEncoding {
TransferEncoding {
kind: TransferEncodingKind::Length(0),
}
}
#[inline]
pub fn eof() -> TransferEncoding {
TransferEncoding {
kind: TransferEncodingKind::Eof,
}
}
#[inline]
pub fn chunked() -> TransferEncoding {
TransferEncoding {
kind: TransferEncodingKind::Chunked(false),
}
}
#[inline]
pub fn length(len: u64) -> TransferEncoding {
TransferEncoding {
kind: TransferEncodingKind::Length(len),
}
}
/// Encode message. Return `EOF` state of encoder
#[inline]
pub fn encode(&mut self, msg: &[u8], buf: &mut BytesMut) -> io::Result<bool> {
match self.kind {
TransferEncodingKind::Eof => {
let eof = msg.is_empty();
buf.extend_from_slice(msg);
Ok(eof)
}
TransferEncodingKind::Chunked(ref mut eof) => {
if *eof {
return Ok(true);
}
if msg.is_empty() {
*eof = true;
buf.extend_from_slice(b"0\r\n\r\n");
} else {
writeln!(Writer(buf), "{:X}\r", msg.len())
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
buf.reserve(msg.len() + 2);
buf.extend_from_slice(msg);
buf.extend_from_slice(b"\r\n");
}
Ok(*eof)
}
TransferEncodingKind::Length(ref mut remaining) => {
if *remaining > 0 {
if msg.is_empty() {
return Ok(*remaining == 0);
}
let len = cmp::min(*remaining, msg.len() as u64);
buf.extend_from_slice(&msg[..len as usize]);
*remaining -= len as u64;
Ok(*remaining == 0)
} else {
Ok(true)
}
}
}
}
/// Encode eof. Return `EOF` state of encoder
#[inline]
pub fn encode_eof(&mut self, buf: &mut BytesMut) -> io::Result<()> {
match self.kind {
TransferEncodingKind::Eof => Ok(()),
TransferEncodingKind::Length(rem) => {
if rem != 0 {
Err(io::Error::new(io::ErrorKind::UnexpectedEof, ""))
} else {
Ok(())
}
}
TransferEncodingKind::Chunked(ref mut eof) => {
if !*eof {
*eof = true;
buf.extend_from_slice(b"0\r\n\r\n");
}
Ok(())
}
}
}
}
struct Writer<'a>(pub &'a mut BytesMut);
impl<'a> io::Write for Writer<'a> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.extend_from_slice(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
fn write_camel_case(value: &[u8], buffer: &mut [u8]) {
let mut index = 0;
let key = value;
let mut key_iter = key.iter();
if let Some(c) = key_iter.next() {
if *c >= b'a' && *c <= b'z' {
buffer[index] = *c ^ b' ';
index += 1;
}
} else {
return;
}
while let Some(c) = key_iter.next() {
buffer[index] = *c;
index += 1;
if *c == b'-' {
if let Some(c) = key_iter.next() {
if *c >= b'a' && *c <= b'z' {
buffer[index] = *c ^ b' ';
index += 1;
}
}
}
}
}
#[cfg(test)]
mod tests {
use bytes::Bytes;
//use std::rc::Rc;
use super::*;
use crate::http::header::{HeaderValue, CONTENT_TYPE};
use http::header::AUTHORIZATION;
#[test]
fn test_chunked_te() {
let mut bytes = BytesMut::new();
let mut enc = TransferEncoding::chunked();
{
assert!(!enc.encode(b"test", &mut bytes).ok().unwrap());
assert!(enc.encode(b"", &mut bytes).ok().unwrap());
}
assert_eq!(
bytes.take().freeze(),
Bytes::from_static(b"4\r\ntest\r\n0\r\n\r\n")
);
}
#[test]
fn test_camel_case() {
let mut bytes = BytesMut::with_capacity(2048);
let mut head = RequestHead::default();
head.set_camel_case_headers(true);
head.headers.insert(DATE, HeaderValue::from_static("date"));
head.headers
.insert(CONTENT_TYPE, HeaderValue::from_static("plain/text"));
let mut head = RequestHeadType::Owned(head);
let _ = head.encode_headers(
&mut bytes,
Version::HTTP_11,
BodySize::Empty,
ConnectionType::Close,
&ServiceConfig::default(),
);
let data = String::from_utf8(Vec::from(bytes.take().freeze().as_ref())).unwrap();
assert!(data.contains("Content-Length: 0\r\n"));
assert!(data.contains("Connection: close\r\n"));
assert!(data.contains("Content-Type: plain/text\r\n"));
assert!(data.contains("Date: date\r\n"));
let _ = head.encode_headers(
&mut bytes,
Version::HTTP_11,
BodySize::Stream,
ConnectionType::KeepAlive,
&ServiceConfig::default(),
);
let data = String::from_utf8(Vec::from(bytes.take().freeze().as_ref())).unwrap();
assert!(data.contains("Transfer-Encoding: chunked\r\n"));
assert!(data.contains("Content-Type: plain/text\r\n"));
assert!(data.contains("Date: date\r\n"));
let _ = head.encode_headers(
&mut bytes,
Version::HTTP_11,
BodySize::Sized64(100),
ConnectionType::KeepAlive,
&ServiceConfig::default(),
);
let data = String::from_utf8(Vec::from(bytes.take().freeze().as_ref())).unwrap();
assert!(data.contains("Content-Length: 100\r\n"));
assert!(data.contains("Content-Type: plain/text\r\n"));
assert!(data.contains("Date: date\r\n"));
let mut head = RequestHead::default();
head.set_camel_case_headers(false);
head.headers.insert(DATE, HeaderValue::from_static("date"));
head.headers
.insert(CONTENT_TYPE, HeaderValue::from_static("plain/text"));
head.headers
.append(CONTENT_TYPE, HeaderValue::from_static("xml"));
let mut head = RequestHeadType::Owned(head);
let _ = head.encode_headers(
&mut bytes,
Version::HTTP_11,
BodySize::Stream,
ConnectionType::KeepAlive,
&ServiceConfig::default(),
);
let data = String::from_utf8(Vec::from(bytes.take().freeze().as_ref())).unwrap();
assert!(data.contains("transfer-encoding: chunked\r\n"));
assert!(data.contains("content-type: xml\r\n"));
assert!(data.contains("content-type: plain/text\r\n"));
assert!(data.contains("date: date\r\n"));
}
#[test]
fn test_extra_headers() {
let mut bytes = BytesMut::with_capacity(2048);
let mut head = RequestHead::default();
head.headers.insert(
AUTHORIZATION,
HeaderValue::from_static("some authorization"),
);
let mut extra_headers = HeaderMap::new();
extra_headers.insert(
AUTHORIZATION,
HeaderValue::from_static("another authorization"),
);
extra_headers.insert(DATE, HeaderValue::from_static("date"));
let mut head = RequestHeadType::Rc(Rc::new(head), Some(extra_headers));
let _ = head.encode_headers(
&mut bytes,
Version::HTTP_11,
BodySize::Empty,
ConnectionType::Close,
&ServiceConfig::default(),
);
let data = String::from_utf8(Vec::from(bytes.take().freeze().as_ref())).unwrap();
assert!(data.contains("content-length: 0\r\n"));
assert!(data.contains("connection: close\r\n"));
assert!(data.contains("authorization: another authorization\r\n"));
assert!(data.contains("date: date\r\n"));
}
}

View File

@ -0,0 +1,39 @@
use std::task::{Context, Poll};
use actix_server_config::ServerConfig;
use actix_service::{Service, ServiceFactory};
use futures::future::{ok, Ready};
use crate::error::Error;
use crate::request::Request;
pub struct ExpectHandler;
impl ServiceFactory for ExpectHandler {
type Config = ServerConfig;
type Request = Request;
type Response = Request;
type Error = Error;
type Service = ExpectHandler;
type InitError = Error;
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: &ServerConfig) -> Self::Future {
ok(ExpectHandler)
}
}
impl Service for ExpectHandler {
type Request = Request;
type Response = Request;
type Error = Error;
type Future = Ready<Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _: &mut Context) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Request) -> Self::Future {
ok(req)
}
}

85
actix-http/src/h1/mod.rs Normal file
View File

@ -0,0 +1,85 @@
//! HTTP/1 implementation
use bytes::{Bytes, BytesMut};
mod client;
mod codec;
mod decoder;
mod dispatcher;
mod encoder;
mod expect;
mod payload;
mod service;
mod upgrade;
mod utils;
pub use self::client::{ClientCodec, ClientPayloadCodec};
pub use self::codec::Codec;
pub use self::dispatcher::Dispatcher;
pub use self::expect::ExpectHandler;
pub use self::payload::Payload;
pub use self::service::{H1Service, H1ServiceHandler, OneRequest};
pub use self::upgrade::UpgradeHandler;
pub use self::utils::SendResponse;
#[derive(Debug)]
/// Codec message
pub enum Message<T> {
/// Http message
Item(T),
/// Payload chunk
Chunk(Option<Bytes>),
}
impl<T> From<T> for Message<T> {
fn from(item: T) -> Self {
Message::Item(item)
}
}
/// Incoming request type
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum MessageType {
None,
Payload,
Stream,
}
const LW: usize = 2 * 1024;
const HW: usize = 32 * 1024;
pub(crate) fn reserve_readbuf(src: &mut BytesMut) {
let cap = src.capacity();
if cap < LW {
src.reserve(HW - cap);
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::request::Request;
impl Message<Request> {
pub fn message(self) -> Request {
match self {
Message::Item(req) => req,
_ => panic!("error"),
}
}
pub fn chunk(self) -> Bytes {
match self {
Message::Chunk(Some(data)) => data,
_ => panic!("error"),
}
}
pub fn eof(self) -> bool {
match self {
Message::Chunk(None) => true,
Message::Chunk(Some(_)) => false,
_ => panic!("error"),
}
}
}
}

View File

@ -0,0 +1,244 @@
//! Payload stream
use std::cell::RefCell;
use std::collections::VecDeque;
use std::pin::Pin;
use std::rc::{Rc, Weak};
use std::task::{Context, Poll};
use actix_utils::task::LocalWaker;
use bytes::Bytes;
use futures::Stream;
use crate::error::PayloadError;
/// max buffer size 32k
pub(crate) const MAX_BUFFER_SIZE: usize = 32_768;
#[derive(Debug, PartialEq)]
pub enum PayloadStatus {
Read,
Pause,
Dropped,
}
/// Buffered stream of bytes chunks
///
/// Payload stores chunks in a vector. First chunk can be received with
/// `.readany()` method. Payload stream is not thread safe. Payload does not
/// notify current task when new data is available.
///
/// Payload stream can be used as `Response` body stream.
#[derive(Debug)]
pub struct Payload {
inner: Rc<RefCell<Inner>>,
}
impl Payload {
/// Create payload stream.
///
/// This method construct two objects responsible for bytes stream
/// generation.
///
/// * `PayloadSender` - *Sender* side of the stream
///
/// * `Payload` - *Receiver* side of the stream
pub fn create(eof: bool) -> (PayloadSender, Payload) {
let shared = Rc::new(RefCell::new(Inner::new(eof)));
(
PayloadSender {
inner: Rc::downgrade(&shared),
},
Payload { inner: shared },
)
}
/// Create empty payload
#[doc(hidden)]
pub fn empty() -> Payload {
Payload {
inner: Rc::new(RefCell::new(Inner::new(true))),
}
}
/// Length of the data in this payload
#[cfg(test)]
pub fn len(&self) -> usize {
self.inner.borrow().len()
}
/// Is payload empty
#[cfg(test)]
pub fn is_empty(&self) -> bool {
self.inner.borrow().len() == 0
}
/// Put unused data back to payload
#[inline]
pub fn unread_data(&mut self, data: Bytes) {
self.inner.borrow_mut().unread_data(data);
}
#[inline]
pub fn readany(
&mut self,
cx: &mut Context,
) -> Poll<Option<Result<Bytes, PayloadError>>> {
self.inner.borrow_mut().readany(cx)
}
}
impl Stream for Payload {
type Item = Result<Bytes, PayloadError>;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context,
) -> Poll<Option<Result<Bytes, PayloadError>>> {
self.inner.borrow_mut().readany(cx)
}
}
/// Sender part of the payload stream
pub struct PayloadSender {
inner: Weak<RefCell<Inner>>,
}
impl PayloadSender {
#[inline]
pub fn set_error(&mut self, err: PayloadError) {
if let Some(shared) = self.inner.upgrade() {
shared.borrow_mut().set_error(err)
}
}
#[inline]
pub fn feed_eof(&mut self) {
if let Some(shared) = self.inner.upgrade() {
shared.borrow_mut().feed_eof()
}
}
#[inline]
pub fn feed_data(&mut self, data: Bytes) {
if let Some(shared) = self.inner.upgrade() {
shared.borrow_mut().feed_data(data)
}
}
#[inline]
pub fn need_read(&self, cx: &mut Context) -> PayloadStatus {
// we check need_read only if Payload (other side) is alive,
// otherwise always return true (consume payload)
if let Some(shared) = self.inner.upgrade() {
if shared.borrow().need_read {
PayloadStatus::Read
} else {
shared.borrow_mut().io_task.register(cx.waker());
PayloadStatus::Pause
}
} else {
PayloadStatus::Dropped
}
}
}
#[derive(Debug)]
struct Inner {
len: usize,
eof: bool,
err: Option<PayloadError>,
need_read: bool,
items: VecDeque<Bytes>,
task: LocalWaker,
io_task: LocalWaker,
}
impl Inner {
fn new(eof: bool) -> Self {
Inner {
eof,
len: 0,
err: None,
items: VecDeque::new(),
need_read: true,
task: LocalWaker::new(),
io_task: LocalWaker::new(),
}
}
#[inline]
fn set_error(&mut self, err: PayloadError) {
self.err = Some(err);
}
#[inline]
fn feed_eof(&mut self) {
self.eof = true;
}
#[inline]
fn feed_data(&mut self, data: Bytes) {
self.len += data.len();
self.items.push_back(data);
self.need_read = self.len < MAX_BUFFER_SIZE;
if let Some(task) = self.task.take() {
task.wake()
}
}
#[cfg(test)]
fn len(&self) -> usize {
self.len
}
fn readany(
&mut self,
cx: &mut Context,
) -> Poll<Option<Result<Bytes, PayloadError>>> {
if let Some(data) = self.items.pop_front() {
self.len -= data.len();
self.need_read = self.len < MAX_BUFFER_SIZE;
if self.need_read && !self.eof {
self.task.register(cx.waker());
}
self.io_task.wake();
Poll::Ready(Some(Ok(data)))
} else if let Some(err) = self.err.take() {
Poll::Ready(Some(Err(err)))
} else if self.eof {
Poll::Ready(None)
} else {
self.need_read = true;
self.task.register(cx.waker());
self.io_task.wake();
Poll::Pending
}
}
fn unread_data(&mut self, data: Bytes) {
self.len += data.len();
self.items.push_front(data);
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures::future::poll_fn;
#[actix_rt::test]
async fn test_unread_data() {
let (_, mut payload) = Payload::create(false);
payload.unread_data(Bytes::from("data"));
assert!(!payload.is_empty());
assert_eq!(payload.len(), 4);
assert_eq!(
Bytes::from("data"),
poll_fn(|cx| payload.readany(cx)).await.unwrap().unwrap()
);
}
}

View File

@ -0,0 +1,451 @@
use std::fmt;
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::rc::Rc;
use std::task::{Context, Poll};
use actix_codec::Framed;
use actix_server_config::{Io, IoStream, ServerConfig as SrvConfig};
use actix_service::{IntoServiceFactory, Service, ServiceFactory};
use futures::future::{ok, Ready};
use futures::ready;
use crate::body::MessageBody;
use crate::cloneable::CloneableService;
use crate::config::{KeepAlive, ServiceConfig};
use crate::error::{DispatchError, Error, ParseError};
use crate::helpers::DataFactory;
use crate::request::Request;
use crate::response::Response;
use super::codec::Codec;
use super::dispatcher::Dispatcher;
use super::{ExpectHandler, Message, UpgradeHandler};
/// `ServiceFactory` implementation for HTTP1 transport
pub struct H1Service<T, P, S, B, X = ExpectHandler, U = UpgradeHandler<T>> {
srv: S,
cfg: ServiceConfig,
expect: X,
upgrade: Option<U>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
_t: PhantomData<(T, P, B)>,
}
impl<T, P, S, B> H1Service<T, P, S, B>
where
S: ServiceFactory<Config = SrvConfig, Request = Request>,
S::Error: Into<Error>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
{
/// Create new `HttpService` instance with default config.
pub fn new<F: IntoServiceFactory<S>>(service: F) -> Self {
let cfg = ServiceConfig::new(KeepAlive::Timeout(5), 5000, 0);
H1Service {
cfg,
srv: service.into_factory(),
expect: ExpectHandler,
upgrade: None,
on_connect: None,
_t: PhantomData,
}
}
/// Create new `HttpService` instance with config.
pub fn with_config<F: IntoServiceFactory<S>>(
cfg: ServiceConfig,
service: F,
) -> Self {
H1Service {
cfg,
srv: service.into_factory(),
expect: ExpectHandler,
upgrade: None,
on_connect: None,
_t: PhantomData,
}
}
}
impl<T, P, S, B, X, U> H1Service<T, P, S, B, X, U>
where
S: ServiceFactory<Config = SrvConfig, Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
S::InitError: fmt::Debug,
B: MessageBody,
{
pub fn expect<X1>(self, expect: X1) -> H1Service<T, P, S, B, X1, U>
where
X1: ServiceFactory<Request = Request, Response = Request>,
X1::Error: Into<Error>,
X1::InitError: fmt::Debug,
{
H1Service {
expect,
cfg: self.cfg,
srv: self.srv,
upgrade: self.upgrade,
on_connect: self.on_connect,
_t: PhantomData,
}
}
pub fn upgrade<U1>(self, upgrade: Option<U1>) -> H1Service<T, P, S, B, X, U1>
where
U1: ServiceFactory<Request = (Request, Framed<T, Codec>), Response = ()>,
U1::Error: fmt::Display,
U1::InitError: fmt::Debug,
{
H1Service {
upgrade,
cfg: self.cfg,
srv: self.srv,
expect: self.expect,
on_connect: self.on_connect,
_t: PhantomData,
}
}
/// Set on connect callback.
pub(crate) fn on_connect(
mut self,
f: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
) -> Self {
self.on_connect = f;
self
}
}
impl<T, P, S, B, X, U> ServiceFactory for H1Service<T, P, S, B, X, U>
where
T: IoStream,
S: ServiceFactory<Config = SrvConfig, Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
S::InitError: fmt::Debug,
B: MessageBody,
X: ServiceFactory<Config = SrvConfig, Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
U: ServiceFactory<
Config = SrvConfig,
Request = (Request, Framed<T, Codec>),
Response = (),
>,
U::Error: fmt::Display,
U::InitError: fmt::Debug,
{
type Config = SrvConfig;
type Request = Io<T, P>;
type Response = ();
type Error = DispatchError;
type InitError = ();
type Service = H1ServiceHandler<T, P, S::Service, B, X::Service, U::Service>;
type Future = H1ServiceResponse<T, P, S, B, X, U>;
fn new_service(&self, cfg: &SrvConfig) -> Self::Future {
H1ServiceResponse {
fut: self.srv.new_service(cfg),
fut_ex: Some(self.expect.new_service(cfg)),
fut_upg: self.upgrade.as_ref().map(|f| f.new_service(cfg)),
expect: None,
upgrade: None,
on_connect: self.on_connect.clone(),
cfg: Some(self.cfg.clone()),
_t: PhantomData,
}
}
}
#[doc(hidden)]
#[pin_project::pin_project]
pub struct H1ServiceResponse<T, P, S, B, X, U>
where
S: ServiceFactory<Request = Request>,
S::Error: Into<Error>,
S::InitError: fmt::Debug,
X: ServiceFactory<Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
U: ServiceFactory<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
U::InitError: fmt::Debug,
{
#[pin]
fut: S::Future,
#[pin]
fut_ex: Option<X::Future>,
#[pin]
fut_upg: Option<U::Future>,
expect: Option<X::Service>,
upgrade: Option<U::Service>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
cfg: Option<ServiceConfig>,
_t: PhantomData<(T, P, B)>,
}
impl<T, P, S, B, X, U> Future for H1ServiceResponse<T, P, S, B, X, U>
where
T: IoStream,
S: ServiceFactory<Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
S::InitError: fmt::Debug,
B: MessageBody,
X: ServiceFactory<Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
U: ServiceFactory<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
U::InitError: fmt::Debug,
{
type Output =
Result<H1ServiceHandler<T, P, S::Service, B, X::Service, U::Service>, ()>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let mut this = self.as_mut().project();
if let Some(fut) = this.fut_ex.as_pin_mut() {
let expect = ready!(fut
.poll(cx)
.map_err(|e| log::error!("Init http service error: {:?}", e)))?;
this = self.as_mut().project();
*this.expect = Some(expect);
this.fut_ex.set(None);
}
if let Some(fut) = this.fut_upg.as_pin_mut() {
let upgrade = ready!(fut
.poll(cx)
.map_err(|e| log::error!("Init http service error: {:?}", e)))?;
this = self.as_mut().project();
*this.upgrade = Some(upgrade);
this.fut_ex.set(None);
}
let result = ready!(this
.fut
.poll(cx)
.map_err(|e| log::error!("Init http service error: {:?}", e)));
Poll::Ready(result.map(|service| {
let this = self.as_mut().project();
H1ServiceHandler::new(
this.cfg.take().unwrap(),
service,
this.expect.take().unwrap(),
this.upgrade.take(),
this.on_connect.clone(),
)
}))
}
}
/// `Service` implementation for HTTP1 transport
pub struct H1ServiceHandler<T, P, S, B, X, U> {
srv: CloneableService<S>,
expect: CloneableService<X>,
upgrade: Option<CloneableService<U>>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
cfg: ServiceConfig,
_t: PhantomData<(T, P, B)>,
}
impl<T, P, S, B, X, U> H1ServiceHandler<T, P, S, B, X, U>
where
S: Service<Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
fn new(
cfg: ServiceConfig,
srv: S,
expect: X,
upgrade: Option<U>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
) -> H1ServiceHandler<T, P, S, B, X, U> {
H1ServiceHandler {
srv: CloneableService::new(srv),
expect: CloneableService::new(expect),
upgrade: upgrade.map(CloneableService::new),
cfg,
on_connect,
_t: PhantomData,
}
}
}
impl<T, P, S, B, X, U> Service for H1ServiceHandler<T, P, S, B, X, U>
where
T: IoStream,
S: Service<Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
type Request = Io<T, P>;
type Response = ();
type Error = DispatchError;
type Future = Dispatcher<T, S, B, X, U>;
fn poll_ready(&mut self, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
let ready = self
.expect
.poll_ready(cx)
.map_err(|e| {
let e = e.into();
log::error!("Http service readiness error: {:?}", e);
DispatchError::Service(e)
})?
.is_ready();
let ready = self
.srv
.poll_ready(cx)
.map_err(|e| {
let e = e.into();
log::error!("Http service readiness error: {:?}", e);
DispatchError::Service(e)
})?
.is_ready()
&& ready;
if ready {
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
}
fn call(&mut self, req: Self::Request) -> Self::Future {
let io = req.into_parts().0;
let on_connect = if let Some(ref on_connect) = self.on_connect {
Some(on_connect(&io))
} else {
None
};
Dispatcher::new(
io,
self.cfg.clone(),
self.srv.clone(),
self.expect.clone(),
self.upgrade.clone(),
on_connect,
)
}
}
/// `ServiceFactory` implementation for `OneRequestService` service
#[derive(Default)]
pub struct OneRequest<T, P> {
config: ServiceConfig,
_t: PhantomData<(T, P)>,
}
impl<T, P> OneRequest<T, P>
where
T: IoStream,
{
/// Create new `H1SimpleService` instance.
pub fn new() -> Self {
OneRequest {
config: ServiceConfig::default(),
_t: PhantomData,
}
}
}
impl<T, P> ServiceFactory for OneRequest<T, P>
where
T: IoStream,
{
type Config = SrvConfig;
type Request = Io<T, P>;
type Response = (Request, Framed<T, Codec>);
type Error = ParseError;
type InitError = ();
type Service = OneRequestService<T, P>;
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: &SrvConfig) -> Self::Future {
ok(OneRequestService {
config: self.config.clone(),
_t: PhantomData,
})
}
}
/// `Service` implementation for HTTP1 transport. Reads one request and returns
/// request and framed object.
pub struct OneRequestService<T, P> {
config: ServiceConfig,
_t: PhantomData<(T, P)>,
}
impl<T, P> Service for OneRequestService<T, P>
where
T: IoStream,
{
type Request = Io<T, P>;
type Response = (Request, Framed<T, Codec>);
type Error = ParseError;
type Future = OneRequestServiceResponse<T>;
fn poll_ready(&mut self, _: &mut Context) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Self::Request) -> Self::Future {
OneRequestServiceResponse {
framed: Some(Framed::new(
req.into_parts().0,
Codec::new(self.config.clone()),
)),
}
}
}
#[doc(hidden)]
pub struct OneRequestServiceResponse<T>
where
T: IoStream,
{
framed: Option<Framed<T, Codec>>,
}
impl<T> Future for OneRequestServiceResponse<T>
where
T: IoStream,
{
type Output = Result<(Request, Framed<T, Codec>), ParseError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
match self.framed.as_mut().unwrap().next_item(cx) {
Poll::Ready(Some(Ok(req))) => match req {
Message::Item(req) => {
Poll::Ready(Ok((req, self.framed.take().unwrap())))
}
Message::Chunk(_) => unreachable!("Something is wrong"),
},
Poll::Ready(Some(Err(err))) => Poll::Ready(Err(err)),
Poll::Ready(None) => Poll::Ready(Err(ParseError::Incomplete)),
Poll::Pending => Poll::Pending,
}
}
}

View File

@ -0,0 +1,42 @@
use std::marker::PhantomData;
use std::task::{Context, Poll};
use actix_codec::Framed;
use actix_server_config::ServerConfig;
use actix_service::{Service, ServiceFactory};
use futures::future::Ready;
use crate::error::Error;
use crate::h1::Codec;
use crate::request::Request;
pub struct UpgradeHandler<T>(PhantomData<T>);
impl<T> ServiceFactory for UpgradeHandler<T> {
type Config = ServerConfig;
type Request = (Request, Framed<T, Codec>);
type Response = ();
type Error = Error;
type Service = UpgradeHandler<T>;
type InitError = Error;
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: &ServerConfig) -> Self::Future {
unimplemented!()
}
}
impl<T> Service for UpgradeHandler<T> {
type Request = (Request, Framed<T, Codec>);
type Response = ();
type Error = Error;
type Future = Ready<Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _: &mut Context) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, _: Self::Request) -> Self::Future {
unimplemented!()
}
}

View File

@ -0,0 +1,97 @@
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use crate::body::{BodySize, MessageBody, ResponseBody};
use crate::error::Error;
use crate::h1::{Codec, Message};
use crate::response::Response;
/// Send http/1 response
#[pin_project::pin_project]
pub struct SendResponse<T, B> {
res: Option<Message<(Response<()>, BodySize)>>,
body: Option<ResponseBody<B>>,
framed: Option<Framed<T, Codec>>,
}
impl<T, B> SendResponse<T, B>
where
B: MessageBody,
{
pub fn new(framed: Framed<T, Codec>, response: Response<B>) -> Self {
let (res, body) = response.into_parts();
SendResponse {
res: Some((res, body.size()).into()),
body: Some(body),
framed: Some(framed),
}
}
}
impl<T, B> Future for SendResponse<T, B>
where
T: AsyncRead + AsyncWrite,
B: MessageBody,
{
type Output = Result<Framed<T, Codec>, Error>;
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let this = self.get_mut();
loop {
let mut body_ready = this.body.is_some();
let framed = this.framed.as_mut().unwrap();
// send body
if this.res.is_none() && this.body.is_some() {
while body_ready && this.body.is_some() && !framed.is_write_buf_full() {
match this.body.as_mut().unwrap().poll_next(cx)? {
Poll::Ready(item) => {
// body is done
if item.is_none() {
let _ = this.body.take();
}
framed.write(Message::Chunk(item))?;
}
Poll::Pending => body_ready = false,
}
}
}
// flush write buffer
if !framed.is_write_buf_empty() {
match framed.flush(cx)? {
Poll::Ready(_) => {
if body_ready {
continue;
} else {
return Poll::Pending;
}
}
Poll::Pending => return Poll::Pending,
}
}
// send response
if let Some(res) = this.res.take() {
framed.write(res)?;
continue;
}
if this.body.is_some() {
if body_ready {
continue;
} else {
return Poll::Pending;
}
} else {
break;
}
}
Poll::Ready(Ok(this.framed.take().unwrap()))
}
}

View File

@ -0,0 +1,366 @@
use std::collections::VecDeque;
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Instant;
use std::{fmt, mem, net};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::time::Delay;
use actix_server_config::IoStream;
use actix_service::Service;
use bitflags::bitflags;
use bytes::{Bytes, BytesMut};
use futures::{ready, Sink, Stream};
use h2::server::{Connection, SendResponse};
use h2::{RecvStream, SendStream};
use http::header::{
HeaderValue, ACCEPT_ENCODING, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING,
};
use http::HttpTryFrom;
use log::{debug, error, trace};
use crate::body::{Body, BodySize, MessageBody, ResponseBody};
use crate::cloneable::CloneableService;
use crate::config::ServiceConfig;
use crate::error::{DispatchError, Error, ParseError, PayloadError, ResponseError};
use crate::helpers::DataFactory;
use crate::httpmessage::HttpMessage;
use crate::message::ResponseHead;
use crate::payload::Payload;
use crate::request::Request;
use crate::response::Response;
const CHUNK_SIZE: usize = 16_384;
/// Dispatcher for HTTP/2 protocol
#[pin_project::pin_project]
pub struct Dispatcher<T: IoStream, S: Service<Request = Request>, B: MessageBody> {
service: CloneableService<S>,
connection: Connection<T, Bytes>,
on_connect: Option<Box<dyn DataFactory>>,
config: ServiceConfig,
peer_addr: Option<net::SocketAddr>,
ka_expire: Instant,
ka_timer: Option<Delay>,
_t: PhantomData<B>,
}
impl<T, S, B> Dispatcher<T, S, B>
where
T: IoStream,
S: Service<Request = Request>,
S::Error: Into<Error>,
// S::Future: 'static,
S::Response: Into<Response<B>>,
B: MessageBody,
{
pub(crate) fn new(
service: CloneableService<S>,
connection: Connection<T, Bytes>,
on_connect: Option<Box<dyn DataFactory>>,
config: ServiceConfig,
timeout: Option<Delay>,
peer_addr: Option<net::SocketAddr>,
) -> Self {
// let keepalive = config.keep_alive_enabled();
// let flags = if keepalive {
// Flags::KEEPALIVE | Flags::KEEPALIVE_ENABLED
// } else {
// Flags::empty()
// };
// keep-alive timer
let (ka_expire, ka_timer) = if let Some(delay) = timeout {
(delay.deadline(), Some(delay))
} else if let Some(delay) = config.keep_alive_timer() {
(delay.deadline(), Some(delay))
} else {
(config.now(), None)
};
Dispatcher {
service,
config,
peer_addr,
connection,
on_connect,
ka_expire,
ka_timer,
_t: PhantomData,
}
}
}
impl<T, S, B> Future for Dispatcher<T, S, B>
where
T: IoStream,
S: Service<Request = Request>,
S::Error: Into<Error> + 'static,
S::Future: 'static,
S::Response: Into<Response<B>> + 'static,
B: MessageBody + 'static,
{
type Output = Result<(), DispatchError>;
#[inline]
fn poll(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let this = self.get_mut();
loop {
match Pin::new(&mut this.connection).poll_accept(cx) {
Poll::Ready(None) => return Poll::Ready(Ok(())),
Poll::Ready(Some(Err(err))) => return Poll::Ready(Err(err.into())),
Poll::Ready(Some(Ok((req, res)))) => {
// update keep-alive expire
if this.ka_timer.is_some() {
if let Some(expire) = this.config.keep_alive_expire() {
this.ka_expire = expire;
}
}
let (parts, body) = req.into_parts();
let mut req = Request::with_payload(Payload::<
crate::payload::PayloadStream,
>::H2(
crate::h2::Payload::new(body)
));
let head = &mut req.head_mut();
head.uri = parts.uri;
head.method = parts.method;
head.version = parts.version;
head.headers = parts.headers.into();
head.peer_addr = this.peer_addr;
// set on_connect data
if let Some(ref on_connect) = this.on_connect {
on_connect.set(&mut req.extensions_mut());
}
actix_rt::spawn(ServiceResponse::<
S::Future,
S::Response,
S::Error,
B,
> {
state: ServiceResponseState::ServiceCall(
this.service.call(req),
Some(res),
),
config: this.config.clone(),
buffer: None,
_t: PhantomData,
});
}
Poll::Pending => return Poll::Pending,
}
}
}
}
#[pin_project::pin_project]
struct ServiceResponse<F, I, E, B> {
state: ServiceResponseState<F, B>,
config: ServiceConfig,
buffer: Option<Bytes>,
_t: PhantomData<(I, E)>,
}
enum ServiceResponseState<F, B> {
ServiceCall(F, Option<SendResponse<Bytes>>),
SendPayload(SendStream<Bytes>, ResponseBody<B>),
}
impl<F, I, E, B> ServiceResponse<F, I, E, B>
where
F: Future<Output = Result<I, E>>,
E: Into<Error>,
I: Into<Response<B>>,
B: MessageBody,
{
fn prepare_response(
&self,
head: &ResponseHead,
size: &mut BodySize,
) -> http::Response<()> {
let mut has_date = false;
let mut skip_len = size != &BodySize::Stream;
let mut res = http::Response::new(());
*res.status_mut() = head.status;
*res.version_mut() = http::Version::HTTP_2;
// Content length
match head.status {
http::StatusCode::NO_CONTENT
| http::StatusCode::CONTINUE
| http::StatusCode::PROCESSING => *size = BodySize::None,
http::StatusCode::SWITCHING_PROTOCOLS => {
skip_len = true;
*size = BodySize::Stream;
}
_ => (),
}
let _ = match size {
BodySize::None | BodySize::Stream => None,
BodySize::Empty => res
.headers_mut()
.insert(CONTENT_LENGTH, HeaderValue::from_static("0")),
BodySize::Sized(len) => res.headers_mut().insert(
CONTENT_LENGTH,
HeaderValue::try_from(format!("{}", len)).unwrap(),
),
BodySize::Sized64(len) => res.headers_mut().insert(
CONTENT_LENGTH,
HeaderValue::try_from(format!("{}", len)).unwrap(),
),
};
// copy headers
for (key, value) in head.headers.iter() {
match *key {
CONNECTION | TRANSFER_ENCODING => continue, // http2 specific
CONTENT_LENGTH if skip_len => continue,
DATE => has_date = true,
_ => (),
}
res.headers_mut().append(key, value.clone());
}
// set date header
if !has_date {
let mut bytes = BytesMut::with_capacity(29);
self.config.set_date_header(&mut bytes);
res.headers_mut()
.insert(DATE, HeaderValue::try_from(bytes.freeze()).unwrap());
}
res
}
}
impl<F, I, E, B> Future for ServiceResponse<F, I, E, B>
where
F: Future<Output = Result<I, E>>,
E: Into<Error>,
I: Into<Response<B>>,
B: MessageBody,
{
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let mut this = self.as_mut().project();
match this.state {
ServiceResponseState::ServiceCall(ref mut call, ref mut send) => {
match unsafe { Pin::new_unchecked(call) }.poll(cx) {
Poll::Ready(Ok(res)) => {
let (res, body) = res.into().replace_body(());
let mut send = send.take().unwrap();
let mut size = body.size();
let h2_res =
self.as_mut().prepare_response(res.head(), &mut size);
this = self.as_mut().project();
let stream = match send.send_response(h2_res, size.is_eof()) {
Err(e) => {
trace!("Error sending h2 response: {:?}", e);
return Poll::Ready(());
}
Ok(stream) => stream,
};
if size.is_eof() {
Poll::Ready(())
} else {
*this.state =
ServiceResponseState::SendPayload(stream, body);
self.poll(cx)
}
}
Poll::Pending => Poll::Pending,
Poll::Ready(Err(e)) => {
let res: Response = e.into().into();
let (res, body) = res.replace_body(());
let mut send = send.take().unwrap();
let mut size = body.size();
let h2_res =
self.as_mut().prepare_response(res.head(), &mut size);
this = self.as_mut().project();
let stream = match send.send_response(h2_res, size.is_eof()) {
Err(e) => {
trace!("Error sending h2 response: {:?}", e);
return Poll::Ready(());
}
Ok(stream) => stream,
};
if size.is_eof() {
Poll::Ready(())
} else {
*this.state = ServiceResponseState::SendPayload(
stream,
body.into_body(),
);
self.poll(cx)
}
}
}
}
ServiceResponseState::SendPayload(ref mut stream, ref mut body) => loop {
loop {
if let Some(ref mut buffer) = this.buffer {
match stream.poll_capacity(cx) {
Poll::Pending => return Poll::Pending,
Poll::Ready(None) => return Poll::Ready(()),
Poll::Ready(Some(Ok(cap))) => {
let len = buffer.len();
let bytes = buffer.split_to(std::cmp::min(cap, len));
if let Err(e) = stream.send_data(bytes, false) {
warn!("{:?}", e);
return Poll::Ready(());
} else if !buffer.is_empty() {
let cap = std::cmp::min(buffer.len(), CHUNK_SIZE);
stream.reserve_capacity(cap);
} else {
this.buffer.take();
}
}
Poll::Ready(Some(Err(e))) => {
warn!("{:?}", e);
return Poll::Ready(());
}
}
} else {
match body.poll_next(cx) {
Poll::Pending => return Poll::Pending,
Poll::Ready(None) => {
if let Err(e) = stream.send_data(Bytes::new(), true) {
warn!("{:?}", e);
}
return Poll::Ready(());
}
Poll::Ready(Some(Ok(chunk))) => {
stream.reserve_capacity(std::cmp::min(
chunk.len(),
CHUNK_SIZE,
));
*this.buffer = Some(chunk);
}
Poll::Ready(Some(Err(e))) => {
error!("Response payload stream error: {:?}", e);
return Poll::Ready(());
}
}
}
}
},
}
}
}

49
actix-http/src/h2/mod.rs Normal file
View File

@ -0,0 +1,49 @@
#![allow(dead_code, unused_imports)]
use std::fmt;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use bytes::Bytes;
use futures::Stream;
use h2::RecvStream;
mod dispatcher;
mod service;
pub use self::dispatcher::Dispatcher;
pub use self::service::H2Service;
use crate::error::PayloadError;
/// H2 receive stream
pub struct Payload {
pl: RecvStream,
}
impl Payload {
pub(crate) fn new(pl: RecvStream) -> Self {
Self { pl }
}
}
impl Stream for Payload {
type Item = Result<Bytes, PayloadError>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
match Pin::new(&mut this.pl).poll_data(cx) {
Poll::Ready(Some(Ok(chunk))) => {
let len = chunk.len();
if let Err(err) = this.pl.release_capacity().release_capacity(len) {
Poll::Ready(Some(Err(err.into())))
} else {
Poll::Ready(Some(Ok(chunk)))
}
}
Poll::Ready(Some(Err(err))) => Poll::Ready(Some(Err(err.into()))),
Poll::Pending => Poll::Pending,
Poll::Ready(None) => Poll::Ready(None),
}
}
}

View File

@ -0,0 +1,281 @@
use std::fmt::Debug;
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{io, net, rc};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_server_config::{Io, IoStream, ServerConfig as SrvConfig};
use actix_service::{IntoServiceFactory, Service, ServiceFactory};
use bytes::Bytes;
use futures::future::{ok, Ready};
use futures::{ready, Stream};
use h2::server::{self, Connection, Handshake};
use h2::RecvStream;
use log::error;
use crate::body::MessageBody;
use crate::cloneable::CloneableService;
use crate::config::{KeepAlive, ServiceConfig};
use crate::error::{DispatchError, Error, ParseError, ResponseError};
use crate::helpers::DataFactory;
use crate::payload::Payload;
use crate::request::Request;
use crate::response::Response;
use super::dispatcher::Dispatcher;
/// `ServiceFactory` implementation for HTTP2 transport
pub struct H2Service<T, P, S, B> {
srv: S,
cfg: ServiceConfig,
on_connect: Option<rc::Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
_t: PhantomData<(T, P, B)>,
}
impl<T, P, S, B> H2Service<T, P, S, B>
where
S: ServiceFactory<Config = SrvConfig, Request = Request>,
S::Error: Into<Error> + 'static,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
{
/// Create new `HttpService` instance.
pub fn new<F: IntoServiceFactory<S>>(service: F) -> Self {
let cfg = ServiceConfig::new(KeepAlive::Timeout(5), 5000, 0);
H2Service {
cfg,
on_connect: None,
srv: service.into_factory(),
_t: PhantomData,
}
}
/// Create new `HttpService` instance with config.
pub fn with_config<F: IntoServiceFactory<S>>(
cfg: ServiceConfig,
service: F,
) -> Self {
H2Service {
cfg,
on_connect: None,
srv: service.into_factory(),
_t: PhantomData,
}
}
/// Set on connect callback.
pub(crate) fn on_connect(
mut self,
f: Option<rc::Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
) -> Self {
self.on_connect = f;
self
}
}
impl<T, P, S, B> ServiceFactory for H2Service<T, P, S, B>
where
T: IoStream,
S: ServiceFactory<Config = SrvConfig, Request = Request>,
S::Error: Into<Error> + 'static,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
{
type Config = SrvConfig;
type Request = Io<T, P>;
type Response = ();
type Error = DispatchError;
type InitError = S::InitError;
type Service = H2ServiceHandler<T, P, S::Service, B>;
type Future = H2ServiceResponse<T, P, S, B>;
fn new_service(&self, cfg: &SrvConfig) -> Self::Future {
H2ServiceResponse {
fut: self.srv.new_service(cfg),
cfg: Some(self.cfg.clone()),
on_connect: self.on_connect.clone(),
_t: PhantomData,
}
}
}
#[doc(hidden)]
#[pin_project::pin_project]
pub struct H2ServiceResponse<T, P, S: ServiceFactory, B> {
#[pin]
fut: S::Future,
cfg: Option<ServiceConfig>,
on_connect: Option<rc::Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
_t: PhantomData<(T, P, B)>,
}
impl<T, P, S, B> Future for H2ServiceResponse<T, P, S, B>
where
T: IoStream,
S: ServiceFactory<Config = SrvConfig, Request = Request>,
S::Error: Into<Error> + 'static,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
{
type Output = Result<H2ServiceHandler<T, P, S::Service, B>, S::InitError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
let this = self.as_mut().project();
Poll::Ready(ready!(this.fut.poll(cx)).map(|service| {
let this = self.as_mut().project();
H2ServiceHandler::new(
this.cfg.take().unwrap(),
this.on_connect.clone(),
service,
)
}))
}
}
/// `Service` implementation for http/2 transport
pub struct H2ServiceHandler<T, P, S, B> {
srv: CloneableService<S>,
cfg: ServiceConfig,
on_connect: Option<rc::Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
_t: PhantomData<(T, P, B)>,
}
impl<T, P, S, B> H2ServiceHandler<T, P, S, B>
where
S: Service<Request = Request>,
S::Error: Into<Error> + 'static,
S::Future: 'static,
S::Response: Into<Response<B>> + 'static,
B: MessageBody + 'static,
{
fn new(
cfg: ServiceConfig,
on_connect: Option<rc::Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
srv: S,
) -> H2ServiceHandler<T, P, S, B> {
H2ServiceHandler {
cfg,
on_connect,
srv: CloneableService::new(srv),
_t: PhantomData,
}
}
}
impl<T, P, S, B> Service for H2ServiceHandler<T, P, S, B>
where
T: IoStream,
S: Service<Request = Request>,
S::Error: Into<Error> + 'static,
S::Future: 'static,
S::Response: Into<Response<B>> + 'static,
B: MessageBody + 'static,
{
type Request = Io<T, P>;
type Response = ();
type Error = DispatchError;
type Future = H2ServiceHandlerResponse<T, S, B>;
fn poll_ready(&mut self, cx: &mut Context) -> Poll<Result<(), Self::Error>> {
self.srv.poll_ready(cx).map_err(|e| {
let e = e.into();
error!("Service readiness error: {:?}", e);
DispatchError::Service(e)
})
}
fn call(&mut self, req: Self::Request) -> Self::Future {
let io = req.into_parts().0;
let peer_addr = io.peer_addr();
let on_connect = if let Some(ref on_connect) = self.on_connect {
Some(on_connect(&io))
} else {
None
};
H2ServiceHandlerResponse {
state: State::Handshake(
Some(self.srv.clone()),
Some(self.cfg.clone()),
peer_addr,
on_connect,
server::handshake(io),
),
}
}
}
enum State<T: IoStream, S: Service<Request = Request>, B: MessageBody>
where
S::Future: 'static,
{
Incoming(Dispatcher<T, S, B>),
Handshake(
Option<CloneableService<S>>,
Option<ServiceConfig>,
Option<net::SocketAddr>,
Option<Box<dyn DataFactory>>,
Handshake<T, Bytes>,
),
}
pub struct H2ServiceHandlerResponse<T, S, B>
where
T: IoStream,
S: Service<Request = Request>,
S::Error: Into<Error> + 'static,
S::Future: 'static,
S::Response: Into<Response<B>> + 'static,
B: MessageBody + 'static,
{
state: State<T, S, B>,
}
impl<T, S, B> Future for H2ServiceHandlerResponse<T, S, B>
where
T: IoStream,
S: Service<Request = Request>,
S::Error: Into<Error> + 'static,
S::Future: 'static,
S::Response: Into<Response<B>> + 'static,
B: MessageBody,
{
type Output = Result<(), DispatchError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context) -> Poll<Self::Output> {
match self.state {
State::Incoming(ref mut disp) => Pin::new(disp).poll(cx),
State::Handshake(
ref mut srv,
ref mut config,
ref peer_addr,
ref mut on_connect,
ref mut handshake,
) => match Pin::new(handshake).poll(cx) {
Poll::Ready(Ok(conn)) => {
self.state = State::Incoming(Dispatcher::new(
srv.take().unwrap(),
conn,
on_connect.take(),
config.take().unwrap(),
None,
*peer_addr,
));
self.poll(cx)
}
Poll::Ready(Err(err)) => {
trace!("H2 handshake error: {}", err);
Poll::Ready(Err(err.into()))
}
Poll::Pending => Poll::Pending,
},
}
}
}

View File

@ -1,6 +1,7 @@
use mime::{self, Mime}; use mime::Mime;
use header::{QualityItem, qitem};
use http::header as http; use crate::header::{qitem, QualityItem};
use crate::http::header;
header! { header! {
/// `Accept` header, defined in [RFC7231](http://tools.ietf.org/html/rfc7231#section-5.3.2) /// `Accept` header, defined in [RFC7231](http://tools.ietf.org/html/rfc7231#section-5.3.2)
@ -30,13 +31,13 @@ header! {
/// ///
/// # Examples /// # Examples
/// ```rust /// ```rust
/// # extern crate actix_web; /// # extern crate actix_http;
/// extern crate mime; /// extern crate mime;
/// use actix_web::HttpResponse; /// use actix_http::Response;
/// use actix_web::http::header::{Accept, qitem}; /// use actix_http::http::header::{Accept, qitem};
/// ///
/// # fn main() { /// # fn main() {
/// let mut builder = HttpResponse::Ok(); /// let mut builder = Response::Ok();
/// ///
/// builder.set( /// builder.set(
/// Accept(vec![ /// Accept(vec![
@ -47,13 +48,13 @@ header! {
/// ``` /// ```
/// ///
/// ```rust /// ```rust
/// # extern crate actix_web; /// # extern crate actix_http;
/// extern crate mime; /// extern crate mime;
/// use actix_web::HttpResponse; /// use actix_http::Response;
/// use actix_web::http::header::{Accept, qitem}; /// use actix_http::http::header::{Accept, qitem};
/// ///
/// # fn main() { /// # fn main() {
/// let mut builder = HttpResponse::Ok(); /// let mut builder = Response::Ok();
/// ///
/// builder.set( /// builder.set(
/// Accept(vec![ /// Accept(vec![
@ -64,13 +65,13 @@ header! {
/// ``` /// ```
/// ///
/// ```rust /// ```rust
/// # extern crate actix_web; /// # extern crate actix_http;
/// extern crate mime; /// extern crate mime;
/// use actix_web::HttpResponse; /// use actix_http::Response;
/// use actix_web::http::header::{Accept, QualityItem, q, qitem}; /// use actix_http::http::header::{Accept, QualityItem, q, qitem};
/// ///
/// # fn main() { /// # fn main() {
/// let mut builder = HttpResponse::Ok(); /// let mut builder = Response::Ok();
/// ///
/// builder.set( /// builder.set(
/// Accept(vec![ /// Accept(vec![
@ -89,7 +90,7 @@ header! {
/// ); /// );
/// # } /// # }
/// ``` /// ```
(Accept, http::ACCEPT) => (QualityItem<Mime>)+ (Accept, header::ACCEPT) => (QualityItem<Mime>)+
test_accept { test_accept {
// Tests from the RFC // Tests from the RFC
@ -104,8 +105,8 @@ header! {
test2, test2,
vec![b"text/plain; q=0.5, text/html, text/x-dvi; q=0.8, text/x-c"], vec![b"text/plain; q=0.5, text/html, text/x-dvi; q=0.8, text/x-c"],
Some(HeaderField(vec![ Some(HeaderField(vec![
QualityItem::new(TEXT_PLAIN, q(500)), QualityItem::new(mime::TEXT_PLAIN, q(500)),
qitem(TEXT_HTML), qitem(mime::TEXT_HTML),
QualityItem::new( QualityItem::new(
"text/x-dvi".parse().unwrap(), "text/x-dvi".parse().unwrap(),
q(800)), q(800)),
@ -116,20 +117,20 @@ header! {
test3, test3,
vec![b"text/plain; charset=utf-8"], vec![b"text/plain; charset=utf-8"],
Some(Accept(vec![ Some(Accept(vec![
qitem(TEXT_PLAIN_UTF_8), qitem(mime::TEXT_PLAIN_UTF_8),
]))); ])));
test_header!( test_header!(
test4, test4,
vec![b"text/plain; charset=utf-8; q=0.5"], vec![b"text/plain; charset=utf-8; q=0.5"],
Some(Accept(vec![ Some(Accept(vec![
QualityItem::new(TEXT_PLAIN_UTF_8, QualityItem::new(mime::TEXT_PLAIN_UTF_8,
q(500)), q(500)),
]))); ])));
#[test] #[test]
fn test_fuzzing1() { fn test_fuzzing1() {
use test::TestRequest; use crate::test::TestRequest;
let req = TestRequest::with_header(super::http::ACCEPT, "chunk#;e").finish(); let req = TestRequest::with_header(crate::header::ACCEPT, "chunk#;e").finish();
let header = Accept::parse(&req); let header = Accept::parse(&req);
assert!(header.is_ok()); assert!(header.is_ok());
} }

View File

@ -1,4 +1,4 @@
use header::{ACCEPT_CHARSET, Charset, QualityItem}; use crate::header::{Charset, QualityItem, ACCEPT_CHARSET};
header! { header! {
/// `Accept-Charset` header, defined in /// `Accept-Charset` header, defined in
@ -22,24 +22,24 @@ header! {
/// ///
/// # Examples /// # Examples
/// ```rust /// ```rust
/// # extern crate actix_web; /// # extern crate actix_http;
/// use actix_web::HttpResponse; /// use actix_http::Response;
/// use actix_web::http::header::{AcceptCharset, Charset, qitem}; /// use actix_http::http::header::{AcceptCharset, Charset, qitem};
/// ///
/// # fn main() { /// # fn main() {
/// let mut builder = HttpResponse::Ok(); /// let mut builder = Response::Ok();
/// builder.set( /// builder.set(
/// AcceptCharset(vec![qitem(Charset::Us_Ascii)]) /// AcceptCharset(vec![qitem(Charset::Us_Ascii)])
/// ); /// );
/// # } /// # }
/// ``` /// ```
/// ```rust /// ```rust
/// # extern crate actix_web; /// # extern crate actix_http;
/// use actix_web::HttpResponse; /// use actix_http::Response;
/// use actix_web::http::header::{AcceptCharset, Charset, q, QualityItem}; /// use actix_http::http::header::{AcceptCharset, Charset, q, QualityItem};
/// ///
/// # fn main() { /// # fn main() {
/// let mut builder = HttpResponse::Ok(); /// let mut builder = Response::Ok();
/// builder.set( /// builder.set(
/// AcceptCharset(vec![ /// AcceptCharset(vec![
/// QualityItem::new(Charset::Us_Ascii, q(900)), /// QualityItem::new(Charset::Us_Ascii, q(900)),
@ -49,12 +49,12 @@ header! {
/// # } /// # }
/// ``` /// ```
/// ```rust /// ```rust
/// # extern crate actix_web; /// # extern crate actix_http;
/// use actix_web::HttpResponse; /// use actix_http::Response;
/// use actix_web::http::header::{AcceptCharset, Charset, qitem}; /// use actix_http::http::header::{AcceptCharset, Charset, qitem};
/// ///
/// # fn main() { /// # fn main() {
/// let mut builder = HttpResponse::Ok(); /// let mut builder = Response::Ok();
/// builder.set( /// builder.set(
/// AcceptCharset(vec![qitem(Charset::Ext("utf-8".to_owned()))]) /// AcceptCharset(vec![qitem(Charset::Ext("utf-8".to_owned()))])
/// ); /// );

View File

@ -1,5 +1,5 @@
use crate::header::{QualityItem, ACCEPT_LANGUAGE};
use language_tags::LanguageTag; use language_tags::LanguageTag;
use header::{ACCEPT_LANGUAGE, QualityItem};
header! { header! {
/// `Accept-Language` header, defined in /// `Accept-Language` header, defined in
@ -23,13 +23,13 @@ header! {
/// # Examples /// # Examples
/// ///
/// ```rust /// ```rust
/// # extern crate actix_web; /// # extern crate actix_http;
/// # extern crate language_tags; /// # extern crate language_tags;
/// use actix_web::HttpResponse; /// use actix_http::Response;
/// use actix_web::http::header::{AcceptLanguage, LanguageTag, qitem}; /// use actix_http::http::header::{AcceptLanguage, LanguageTag, qitem};
/// ///
/// # fn main() { /// # fn main() {
/// let mut builder = HttpResponse::Ok(); /// let mut builder = Response::Ok();
/// let mut langtag: LanguageTag = Default::default(); /// let mut langtag: LanguageTag = Default::default();
/// langtag.language = Some("en".to_owned()); /// langtag.language = Some("en".to_owned());
/// langtag.region = Some("US".to_owned()); /// langtag.region = Some("US".to_owned());
@ -42,13 +42,13 @@ header! {
/// ``` /// ```
/// ///
/// ```rust /// ```rust
/// # extern crate actix_web; /// # extern crate actix_http;
/// # #[macro_use] extern crate language_tags; /// # #[macro_use] extern crate language_tags;
/// use actix_web::HttpResponse; /// use actix_http::Response;
/// use actix_web::http::header::{AcceptLanguage, QualityItem, q, qitem}; /// use actix_http::http::header::{AcceptLanguage, QualityItem, q, qitem};
/// # /// #
/// # fn main() { /// # fn main() {
/// let mut builder = HttpResponse::Ok(); /// let mut builder = Response::Ok();
/// builder.set( /// builder.set(
/// AcceptLanguage(vec![ /// AcceptLanguage(vec![
/// qitem(langtag!(da)), /// qitem(langtag!(da)),

View File

@ -24,13 +24,13 @@ header! {
/// ///
/// ```rust /// ```rust
/// # extern crate http; /// # extern crate http;
/// # extern crate actix_web; /// # extern crate actix_http;
/// use actix_web::HttpResponse; /// use actix_http::Response;
/// use actix_web::http::header::Allow; /// use actix_http::http::header::Allow;
/// use http::Method; /// use http::Method;
/// ///
/// # fn main() { /// # fn main() {
/// let mut builder = HttpResponse::Ok(); /// let mut builder = Response::Ok();
/// builder.set( /// builder.set(
/// Allow(vec![Method::GET]) /// Allow(vec![Method::GET])
/// ); /// );
@ -39,13 +39,13 @@ header! {
/// ///
/// ```rust /// ```rust
/// # extern crate http; /// # extern crate http;
/// # extern crate actix_web; /// # extern crate actix_http;
/// use actix_web::HttpResponse; /// use actix_http::Response;
/// use actix_web::http::header::Allow; /// use actix_http::http::header::Allow;
/// use http::Method; /// use http::Method;
/// ///
/// # fn main() { /// # fn main() {
/// let mut builder = HttpResponse::Ok(); /// let mut builder = Response::Ok();
/// builder.set( /// builder.set(
/// Allow(vec![ /// Allow(vec![
/// Method::GET, /// Method::GET,

View File

@ -1,8 +1,11 @@
use std::fmt::{self, Write}; use std::fmt::{self, Write};
use std::str::FromStr; use std::str::FromStr;
use http::header; use http::header;
use header::{Header, IntoHeaderValue, Writer};
use header::{from_comma_delimited, fmt_comma_delimited}; use crate::header::{
fmt_comma_delimited, from_comma_delimited, Header, IntoHeaderValue, Writer,
};
/// `Cache-Control` header, defined in [RFC7234](https://tools.ietf.org/html/rfc7234#section-5.2) /// `Cache-Control` header, defined in [RFC7234](https://tools.ietf.org/html/rfc7234#section-5.2)
/// ///
@ -26,29 +29,24 @@ use header::{from_comma_delimited, fmt_comma_delimited};
/// ///
/// # Examples /// # Examples
/// ```rust /// ```rust
/// use actix_web::HttpResponse; /// use actix_http::Response;
/// use actix_web::http::header::{CacheControl, CacheDirective}; /// use actix_http::http::header::{CacheControl, CacheDirective};
/// ///
/// let mut builder = HttpResponse::Ok(); /// let mut builder = Response::Ok();
/// builder.set( /// builder.set(CacheControl(vec![CacheDirective::MaxAge(86400u32)]));
/// CacheControl(vec![CacheDirective::MaxAge(86400u32)])
/// );
/// ``` /// ```
/// ///
/// ```rust /// ```rust
/// use actix_web::HttpResponse; /// use actix_http::Response;
/// use actix_web::http::header::{CacheControl, CacheDirective}; /// use actix_http::http::header::{CacheControl, CacheDirective};
/// ///
/// let mut builder = HttpResponse::Ok(); /// let mut builder = Response::Ok();
/// builder.set( /// builder.set(CacheControl(vec![
/// CacheControl(vec![ /// CacheDirective::NoCache,
/// CacheDirective::NoCache, /// CacheDirective::Private,
/// CacheDirective::Private, /// CacheDirective::MaxAge(360u32),
/// CacheDirective::MaxAge(360u32), /// CacheDirective::Extension("foo".to_owned(), Some("bar".to_owned())),
/// CacheDirective::Extension("foo".to_owned(), /// ]));
/// Some("bar".to_owned())),
/// ])
/// );
/// ``` /// ```
#[derive(PartialEq, Clone, Debug)] #[derive(PartialEq, Clone, Debug)]
pub struct CacheControl(pub Vec<CacheDirective>); pub struct CacheControl(pub Vec<CacheDirective>);
@ -62,14 +60,15 @@ impl Header for CacheControl {
} }
#[inline] #[inline]
fn parse<T>(msg: &T) -> Result<Self, ::error::ParseError> fn parse<T>(msg: &T) -> Result<Self, crate::error::ParseError>
where T: ::HttpMessage where
T: crate::HttpMessage,
{ {
let directives = from_comma_delimited(msg.headers().get_all(Self::name()))?; let directives = from_comma_delimited(msg.headers().get_all(&Self::name()))?;
if !directives.is_empty() { if !directives.is_empty() {
Ok(CacheControl(directives)) Ok(CacheControl(directives))
} else { } else {
Err(::error::ParseError::Header) Err(crate::error::ParseError::Header)
} }
} }
} }
@ -123,32 +122,36 @@ pub enum CacheDirective {
SMaxAge(u32), SMaxAge(u32),
/// Extension directives. Optionally include an argument. /// Extension directives. Optionally include an argument.
Extension(String, Option<String>) Extension(String, Option<String>),
} }
impl fmt::Display for CacheDirective { impl fmt::Display for CacheDirective {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
use self::CacheDirective::*; use self::CacheDirective::*;
fmt::Display::fmt(match *self { fmt::Display::fmt(
NoCache => "no-cache", match *self {
NoStore => "no-store", NoCache => "no-cache",
NoTransform => "no-transform", NoStore => "no-store",
OnlyIfCached => "only-if-cached", NoTransform => "no-transform",
OnlyIfCached => "only-if-cached",
MaxAge(secs) => return write!(f, "max-age={}", secs), MaxAge(secs) => return write!(f, "max-age={}", secs),
MaxStale(secs) => return write!(f, "max-stale={}", secs), MaxStale(secs) => return write!(f, "max-stale={}", secs),
MinFresh(secs) => return write!(f, "min-fresh={}", secs), MinFresh(secs) => return write!(f, "min-fresh={}", secs),
MustRevalidate => "must-revalidate", MustRevalidate => "must-revalidate",
Public => "public", Public => "public",
Private => "private", Private => "private",
ProxyRevalidate => "proxy-revalidate", ProxyRevalidate => "proxy-revalidate",
SMaxAge(secs) => return write!(f, "s-maxage={}", secs), SMaxAge(secs) => return write!(f, "s-maxage={}", secs),
Extension(ref name, None) => &name[..], Extension(ref name, None) => &name[..],
Extension(ref name, Some(ref arg)) => return write!(f, "{}={}", name, arg), Extension(ref name, Some(ref arg)) => {
return write!(f, "{}={}", name, arg);
}, f) }
},
f,
)
} }
} }
@ -167,16 +170,20 @@ impl FromStr for CacheDirective {
"proxy-revalidate" => Ok(ProxyRevalidate), "proxy-revalidate" => Ok(ProxyRevalidate),
"" => Err(None), "" => Err(None),
_ => match s.find('=') { _ => match s.find('=') {
Some(idx) if idx+1 < s.len() => match (&s[..idx], (&s[idx+1..]).trim_matches('"')) { Some(idx) if idx + 1 < s.len() => {
("max-age" , secs) => secs.parse().map(MaxAge).map_err(Some), match (&s[..idx], (&s[idx + 1..]).trim_matches('"')) {
("max-stale", secs) => secs.parse().map(MaxStale).map_err(Some), ("max-age", secs) => secs.parse().map(MaxAge).map_err(Some),
("min-fresh", secs) => secs.parse().map(MinFresh).map_err(Some), ("max-stale", secs) => secs.parse().map(MaxStale).map_err(Some),
("s-maxage", secs) => secs.parse().map(SMaxAge).map_err(Some), ("min-fresh", secs) => secs.parse().map(MinFresh).map_err(Some),
(left, right) => Ok(Extension(left.to_owned(), Some(right.to_owned()))) ("s-maxage", secs) => secs.parse().map(SMaxAge).map_err(Some),
}, (left, right) => {
Ok(Extension(left.to_owned(), Some(right.to_owned())))
}
}
}
Some(_) => Err(None), Some(_) => Err(None),
None => Ok(Extension(s.to_owned(), None)) None => Ok(Extension(s.to_owned(), None)),
} },
} }
} }
} }
@ -184,43 +191,61 @@ impl FromStr for CacheDirective {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use header::Header; use crate::header::Header;
use test::TestRequest; use crate::test::TestRequest;
#[test] #[test]
fn test_parse_multiple_headers() { fn test_parse_multiple_headers() {
let req = TestRequest::with_header( let req = TestRequest::with_header(header::CACHE_CONTROL, "no-cache, private")
header::CACHE_CONTROL, "no-cache, private").finish(); .finish();
let cache = Header::parse(&req); let cache = Header::parse(&req);
assert_eq!(cache.ok(), Some(CacheControl(vec![CacheDirective::NoCache, assert_eq!(
CacheDirective::Private]))) cache.ok(),
Some(CacheControl(vec![
CacheDirective::NoCache,
CacheDirective::Private,
]))
)
} }
#[test] #[test]
fn test_parse_argument() { fn test_parse_argument() {
let req = TestRequest::with_header( let req =
header::CACHE_CONTROL, "max-age=100, private").finish(); TestRequest::with_header(header::CACHE_CONTROL, "max-age=100, private")
.finish();
let cache = Header::parse(&req); let cache = Header::parse(&req);
assert_eq!(cache.ok(), Some(CacheControl(vec![CacheDirective::MaxAge(100), assert_eq!(
CacheDirective::Private]))) cache.ok(),
Some(CacheControl(vec![
CacheDirective::MaxAge(100),
CacheDirective::Private,
]))
)
} }
#[test] #[test]
fn test_parse_quote_form() { fn test_parse_quote_form() {
let req = TestRequest::with_header( let req =
header::CACHE_CONTROL, "max-age=\"200\"").finish(); TestRequest::with_header(header::CACHE_CONTROL, "max-age=\"200\"").finish();
let cache = Header::parse(&req); let cache = Header::parse(&req);
assert_eq!(cache.ok(), Some(CacheControl(vec![CacheDirective::MaxAge(200)]))) assert_eq!(
cache.ok(),
Some(CacheControl(vec![CacheDirective::MaxAge(200)]))
)
} }
#[test] #[test]
fn test_parse_extension() { fn test_parse_extension() {
let req = TestRequest::with_header( let req =
header::CACHE_CONTROL, "foo, bar=baz").finish(); TestRequest::with_header(header::CACHE_CONTROL, "foo, bar=baz").finish();
let cache = Header::parse(&req); let cache = Header::parse(&req);
assert_eq!(cache.ok(), Some(CacheControl(vec![ assert_eq!(
CacheDirective::Extension("foo".to_owned(), None), cache.ok(),
CacheDirective::Extension("bar".to_owned(), Some("baz".to_owned()))]))) Some(CacheControl(vec![
CacheDirective::Extension("foo".to_owned(), None),
CacheDirective::Extension("bar".to_owned(), Some("baz".to_owned())),
]))
)
} }
#[test] #[test]

View File

@ -0,0 +1,995 @@
// # References
//
// "The Content-Disposition Header Field" https://www.ietf.org/rfc/rfc2183.txt
// "The Content-Disposition Header Field in the Hypertext Transfer Protocol (HTTP)" https://www.ietf.org/rfc/rfc6266.txt
// "Returning Values from Forms: multipart/form-data" https://www.ietf.org/rfc/rfc7578.txt
// Browser conformance tests at: http://greenbytes.de/tech/tc2231/
// IANA assignment: http://www.iana.org/assignments/cont-disp/cont-disp.xhtml
use lazy_static::lazy_static;
use regex::Regex;
use std::fmt::{self, Write};
use crate::header::{self, ExtendedValue, Header, IntoHeaderValue, Writer};
/// Split at the index of the first `needle` if it exists or at the end.
fn split_once(haystack: &str, needle: char) -> (&str, &str) {
haystack.find(needle).map_or_else(
|| (haystack, ""),
|sc| {
let (first, last) = haystack.split_at(sc);
(first, last.split_at(1).1)
},
)
}
/// Split at the index of the first `needle` if it exists or at the end, trim the right of the
/// first part and the left of the last part.
fn split_once_and_trim(haystack: &str, needle: char) -> (&str, &str) {
let (first, last) = split_once(haystack, needle);
(first.trim_end(), last.trim_start())
}
/// The implied disposition of the content of the HTTP body.
#[derive(Clone, Debug, PartialEq)]
pub enum DispositionType {
/// Inline implies default processing
Inline,
/// Attachment implies that the recipient should prompt the user to save the response locally,
/// rather than process it normally (as per its media type).
Attachment,
/// Used in *multipart/form-data* as defined in
/// [RFC7578](https://tools.ietf.org/html/rfc7578) to carry the field name and the file name.
FormData,
/// Extension type. Should be handled by recipients the same way as Attachment
Ext(String),
}
impl<'a> From<&'a str> for DispositionType {
fn from(origin: &'a str) -> DispositionType {
if origin.eq_ignore_ascii_case("inline") {
DispositionType::Inline
} else if origin.eq_ignore_ascii_case("attachment") {
DispositionType::Attachment
} else if origin.eq_ignore_ascii_case("form-data") {
DispositionType::FormData
} else {
DispositionType::Ext(origin.to_owned())
}
}
}
/// Parameter in [`ContentDisposition`].
///
/// # Examples
/// ```
/// use actix_http::http::header::DispositionParam;
///
/// let param = DispositionParam::Filename(String::from("sample.txt"));
/// assert!(param.is_filename());
/// assert_eq!(param.as_filename().unwrap(), "sample.txt");
/// ```
#[derive(Clone, Debug, PartialEq)]
#[allow(clippy::large_enum_variant)]
pub enum DispositionParam {
/// For [`DispositionType::FormData`] (i.e. *multipart/form-data*), the name of an field from
/// the form.
Name(String),
/// A plain file name.
///
/// It is [not supposed](https://tools.ietf.org/html/rfc6266#appendix-D) to contain any
/// non-ASCII characters when used in a *Content-Disposition* HTTP response header, where
/// [`FilenameExt`](DispositionParam::FilenameExt) with charset UTF-8 may be used instead
/// in case there are Unicode characters in file names.
Filename(String),
/// An extended file name. It must not exist for `ContentType::Formdata` according to
/// [RFC7578 Section 4.2](https://tools.ietf.org/html/rfc7578#section-4.2).
FilenameExt(ExtendedValue),
/// An unrecognized regular parameter as defined in
/// [RFC5987](https://tools.ietf.org/html/rfc5987) as *reg-parameter*, in
/// [RFC6266](https://tools.ietf.org/html/rfc6266) as *token "=" value*. Recipients should
/// ignore unrecognizable parameters.
Unknown(String, String),
/// An unrecognized extended paramater as defined in
/// [RFC5987](https://tools.ietf.org/html/rfc5987) as *ext-parameter*, in
/// [RFC6266](https://tools.ietf.org/html/rfc6266) as *ext-token "=" ext-value*. The single
/// trailling asterisk is not included. Recipients should ignore unrecognizable parameters.
UnknownExt(String, ExtendedValue),
}
impl DispositionParam {
/// Returns `true` if the paramater is [`Name`](DispositionParam::Name).
#[inline]
pub fn is_name(&self) -> bool {
self.as_name().is_some()
}
/// Returns `true` if the paramater is [`Filename`](DispositionParam::Filename).
#[inline]
pub fn is_filename(&self) -> bool {
self.as_filename().is_some()
}
/// Returns `true` if the paramater is [`FilenameExt`](DispositionParam::FilenameExt).
#[inline]
pub fn is_filename_ext(&self) -> bool {
self.as_filename_ext().is_some()
}
/// Returns `true` if the paramater is [`Unknown`](DispositionParam::Unknown) and the `name`
#[inline]
/// matches.
pub fn is_unknown<T: AsRef<str>>(&self, name: T) -> bool {
self.as_unknown(name).is_some()
}
/// Returns `true` if the paramater is [`UnknownExt`](DispositionParam::UnknownExt) and the
/// `name` matches.
#[inline]
pub fn is_unknown_ext<T: AsRef<str>>(&self, name: T) -> bool {
self.as_unknown_ext(name).is_some()
}
/// Returns the name if applicable.
#[inline]
pub fn as_name(&self) -> Option<&str> {
match self {
DispositionParam::Name(ref name) => Some(name.as_str()),
_ => None,
}
}
/// Returns the filename if applicable.
#[inline]
pub fn as_filename(&self) -> Option<&str> {
match self {
DispositionParam::Filename(ref filename) => Some(filename.as_str()),
_ => None,
}
}
/// Returns the filename* if applicable.
#[inline]
pub fn as_filename_ext(&self) -> Option<&ExtendedValue> {
match self {
DispositionParam::FilenameExt(ref value) => Some(value),
_ => None,
}
}
/// Returns the value of the unrecognized regular parameter if it is
/// [`Unknown`](DispositionParam::Unknown) and the `name` matches.
#[inline]
pub fn as_unknown<T: AsRef<str>>(&self, name: T) -> Option<&str> {
match self {
DispositionParam::Unknown(ref ext_name, ref value)
if ext_name.eq_ignore_ascii_case(name.as_ref()) =>
{
Some(value.as_str())
}
_ => None,
}
}
/// Returns the value of the unrecognized extended parameter if it is
/// [`Unknown`](DispositionParam::Unknown) and the `name` matches.
#[inline]
pub fn as_unknown_ext<T: AsRef<str>>(&self, name: T) -> Option<&ExtendedValue> {
match self {
DispositionParam::UnknownExt(ref ext_name, ref value)
if ext_name.eq_ignore_ascii_case(name.as_ref()) =>
{
Some(value)
}
_ => None,
}
}
}
/// A *Content-Disposition* header. It is compatible to be used either as
/// [a response header for the main body](https://mdn.io/Content-Disposition#As_a_response_header_for_the_main_body)
/// as (re)defined in [RFC6266](https://tools.ietf.org/html/rfc6266), or as
/// [a header for a multipart body](https://mdn.io/Content-Disposition#As_a_header_for_a_multipart_body)
/// as (re)defined in [RFC7587](https://tools.ietf.org/html/rfc7578).
///
/// In a regular HTTP response, the *Content-Disposition* response header is a header indicating if
/// the content is expected to be displayed *inline* in the browser, that is, as a Web page or as
/// part of a Web page, or as an attachment, that is downloaded and saved locally, and also can be
/// used to attach additional metadata, such as the filename to use when saving the response payload
/// locally.
///
/// In a *multipart/form-data* body, the HTTP *Content-Disposition* general header is a header that
/// can be used on the subpart of a multipart body to give information about the field it applies to.
/// The subpart is delimited by the boundary defined in the *Content-Type* header. Used on the body
/// itself, *Content-Disposition* has no effect.
///
/// # ABNF
/// ```text
/// content-disposition = "Content-Disposition" ":"
/// disposition-type *( ";" disposition-parm )
///
/// disposition-type = "inline" | "attachment" | disp-ext-type
/// ; case-insensitive
///
/// disp-ext-type = token
///
/// disposition-parm = filename-parm | disp-ext-parm
///
/// filename-parm = "filename" "=" value
/// | "filename*" "=" ext-value
///
/// disp-ext-parm = token "=" value
/// | ext-token "=" ext-value
///
/// ext-token = <the characters in token, followed by "*">
/// ```
///
/// # Note
///
/// filename is [not supposed](https://tools.ietf.org/html/rfc6266#appendix-D) to contain any
/// non-ASCII characters when used in a *Content-Disposition* HTTP response header, where
/// filename* with charset UTF-8 may be used instead in case there are Unicode characters in file
/// names.
/// filename is [acceptable](https://tools.ietf.org/html/rfc7578#section-4.2) to be UTF-8 encoded
/// directly in a *Content-Disposition* header for *multipart/form-data*, though.
///
/// filename* [must not](https://tools.ietf.org/html/rfc7578#section-4.2) be used within
/// *multipart/form-data*.
///
/// # Example
///
/// ```
/// use actix_http::http::header::{
/// Charset, ContentDisposition, DispositionParam, DispositionType,
/// ExtendedValue,
/// };
///
/// let cd1 = ContentDisposition {
/// disposition: DispositionType::Attachment,
/// parameters: vec![DispositionParam::FilenameExt(ExtendedValue {
/// charset: Charset::Iso_8859_1, // The character set for the bytes of the filename
/// language_tag: None, // The optional language tag (see `language-tag` crate)
/// value: b"\xa9 Copyright 1989.txt".to_vec(), // the actual bytes of the filename
/// })],
/// };
/// assert!(cd1.is_attachment());
/// assert!(cd1.get_filename_ext().is_some());
///
/// let cd2 = ContentDisposition {
/// disposition: DispositionType::FormData,
/// parameters: vec![
/// DispositionParam::Name(String::from("file")),
/// DispositionParam::Filename(String::from("bill.odt")),
/// ],
/// };
/// assert_eq!(cd2.get_name(), Some("file")); // field name
/// assert_eq!(cd2.get_filename(), Some("bill.odt"));
///
/// // HTTP response header with Unicode characters in file names
/// let cd3 = ContentDisposition {
/// disposition: DispositionType::Attachment,
/// parameters: vec![
/// DispositionParam::FilenameExt(ExtendedValue {
/// charset: Charset::Ext(String::from("UTF-8")),
/// language_tag: None,
/// value: String::from("\u{1f600}.svg").into_bytes(),
/// }),
/// // fallback for better compatibility
/// DispositionParam::Filename(String::from("Grinning-Face-Emoji.svg"))
/// ],
/// };
/// assert_eq!(cd3.get_filename_ext().map(|ev| ev.value.as_ref()),
/// Some("\u{1f600}.svg".as_bytes()));
/// ```
///
/// # WARN
/// If "filename" parameter is supplied, do not use the file name blindly, check and possibly
/// change to match local file system conventions if applicable, and do not use directory path
/// information that may be present. See [RFC2183](https://tools.ietf.org/html/rfc2183#section-2.3)
/// .
#[derive(Clone, Debug, PartialEq)]
pub struct ContentDisposition {
/// The disposition type
pub disposition: DispositionType,
/// Disposition parameters
pub parameters: Vec<DispositionParam>,
}
impl ContentDisposition {
/// Parse a raw Content-Disposition header value.
pub fn from_raw(hv: &header::HeaderValue) -> Result<Self, crate::error::ParseError> {
// `header::from_one_raw_str` invokes `hv.to_str` which assumes `hv` contains only visible
// ASCII characters. So `hv.as_bytes` is necessary here.
let hv = String::from_utf8(hv.as_bytes().to_vec())
.map_err(|_| crate::error::ParseError::Header)?;
let (disp_type, mut left) = split_once_and_trim(hv.as_str().trim(), ';');
if disp_type.is_empty() {
return Err(crate::error::ParseError::Header);
}
let mut cd = ContentDisposition {
disposition: disp_type.into(),
parameters: Vec::new(),
};
while !left.is_empty() {
let (param_name, new_left) = split_once_and_trim(left, '=');
if param_name.is_empty() || param_name == "*" || new_left.is_empty() {
return Err(crate::error::ParseError::Header);
}
left = new_left;
if param_name.ends_with('*') {
// extended parameters
let param_name = &param_name[..param_name.len() - 1]; // trim asterisk
let (ext_value, new_left) = split_once_and_trim(left, ';');
left = new_left;
let ext_value = header::parse_extended_value(ext_value)?;
let param = if param_name.eq_ignore_ascii_case("filename") {
DispositionParam::FilenameExt(ext_value)
} else {
DispositionParam::UnknownExt(param_name.to_owned(), ext_value)
};
cd.parameters.push(param);
} else {
// regular parameters
let value = if left.starts_with('\"') {
// quoted-string: defined in RFC6266 -> RFC2616 Section 3.6
let mut escaping = false;
let mut quoted_string = vec![];
let mut end = None;
// search for closing quote
for (i, &c) in left.as_bytes().iter().skip(1).enumerate() {
if escaping {
escaping = false;
quoted_string.push(c);
} else if c == 0x5c {
// backslash
escaping = true;
} else if c == 0x22 {
// double quote
end = Some(i + 1); // cuz skipped 1 for the leading quote
break;
} else {
quoted_string.push(c);
}
}
left = &left[end.ok_or(crate::error::ParseError::Header)? + 1..];
left = split_once(left, ';').1.trim_start();
// In fact, it should not be Err if the above code is correct.
String::from_utf8(quoted_string)
.map_err(|_| crate::error::ParseError::Header)?
} else {
// token: won't contains semicolon according to RFC 2616 Section 2.2
let (token, new_left) = split_once_and_trim(left, ';');
left = new_left;
if token.is_empty() {
// quoted-string can be empty, but token cannot be empty
return Err(crate::error::ParseError::Header);
}
token.to_owned()
};
let param = if param_name.eq_ignore_ascii_case("name") {
DispositionParam::Name(value)
} else if param_name.eq_ignore_ascii_case("filename") {
// See also comments in test_from_raw_uncessary_percent_decode.
DispositionParam::Filename(value)
} else {
DispositionParam::Unknown(param_name.to_owned(), value)
};
cd.parameters.push(param);
}
}
Ok(cd)
}
/// Returns `true` if it is [`Inline`](DispositionType::Inline).
pub fn is_inline(&self) -> bool {
match self.disposition {
DispositionType::Inline => true,
_ => false,
}
}
/// Returns `true` if it is [`Attachment`](DispositionType::Attachment).
pub fn is_attachment(&self) -> bool {
match self.disposition {
DispositionType::Attachment => true,
_ => false,
}
}
/// Returns `true` if it is [`FormData`](DispositionType::FormData).
pub fn is_form_data(&self) -> bool {
match self.disposition {
DispositionType::FormData => true,
_ => false,
}
}
/// Returns `true` if it is [`Ext`](DispositionType::Ext) and the `disp_type` matches.
pub fn is_ext<T: AsRef<str>>(&self, disp_type: T) -> bool {
match self.disposition {
DispositionType::Ext(ref t)
if t.eq_ignore_ascii_case(disp_type.as_ref()) =>
{
true
}
_ => false,
}
}
/// Return the value of *name* if exists.
pub fn get_name(&self) -> Option<&str> {
self.parameters.iter().filter_map(|p| p.as_name()).nth(0)
}
/// Return the value of *filename* if exists.
pub fn get_filename(&self) -> Option<&str> {
self.parameters
.iter()
.filter_map(|p| p.as_filename())
.nth(0)
}
/// Return the value of *filename\** if exists.
pub fn get_filename_ext(&self) -> Option<&ExtendedValue> {
self.parameters
.iter()
.filter_map(|p| p.as_filename_ext())
.nth(0)
}
/// Return the value of the parameter which the `name` matches.
pub fn get_unknown<T: AsRef<str>>(&self, name: T) -> Option<&str> {
let name = name.as_ref();
self.parameters
.iter()
.filter_map(|p| p.as_unknown(name))
.nth(0)
}
/// Return the value of the extended parameter which the `name` matches.
pub fn get_unknown_ext<T: AsRef<str>>(&self, name: T) -> Option<&ExtendedValue> {
let name = name.as_ref();
self.parameters
.iter()
.filter_map(|p| p.as_unknown_ext(name))
.nth(0)
}
}
impl IntoHeaderValue for ContentDisposition {
type Error = header::InvalidHeaderValueBytes;
fn try_into(self) -> Result<header::HeaderValue, Self::Error> {
let mut writer = Writer::new();
let _ = write!(&mut writer, "{}", self);
header::HeaderValue::from_shared(writer.take())
}
}
impl Header for ContentDisposition {
fn name() -> header::HeaderName {
header::CONTENT_DISPOSITION
}
fn parse<T: crate::HttpMessage>(msg: &T) -> Result<Self, crate::error::ParseError> {
if let Some(h) = msg.headers().get(&Self::name()) {
Self::from_raw(&h)
} else {
Err(crate::error::ParseError::Header)
}
}
}
impl fmt::Display for DispositionType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
DispositionType::Inline => write!(f, "inline"),
DispositionType::Attachment => write!(f, "attachment"),
DispositionType::FormData => write!(f, "form-data"),
DispositionType::Ext(ref s) => write!(f, "{}", s),
}
}
}
impl fmt::Display for DispositionParam {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// All ASCII control characters (0-30, 127) including horizontal tab, double quote, and
// backslash should be escaped in quoted-string (i.e. "foobar").
// Ref: RFC6266 S4.1 -> RFC2616 S3.6
// filename-parm = "filename" "=" value
// value = token | quoted-string
// quoted-string = ( <"> *(qdtext | quoted-pair ) <"> )
// qdtext = <any TEXT except <">>
// quoted-pair = "\" CHAR
// TEXT = <any OCTET except CTLs,
// but including LWS>
// LWS = [CRLF] 1*( SP | HT )
// OCTET = <any 8-bit sequence of data>
// CHAR = <any US-ASCII character (octets 0 - 127)>
// CTL = <any US-ASCII control character
// (octets 0 - 31) and DEL (127)>
//
// Ref: RFC7578 S4.2 -> RFC2183 S2 -> RFC2045 S5.1
// parameter := attribute "=" value
// attribute := token
// ; Matching of attributes
// ; is ALWAYS case-insensitive.
// value := token / quoted-string
// token := 1*<any (US-ASCII) CHAR except SPACE, CTLs,
// or tspecials>
// tspecials := "(" / ")" / "<" / ">" / "@" /
// "," / ";" / ":" / "\" / <">
// "/" / "[" / "]" / "?" / "="
// ; Must be in quoted-string,
// ; to use within parameter values
//
//
// See also comments in test_from_raw_uncessary_percent_decode.
lazy_static! {
static ref RE: Regex = Regex::new("[\x00-\x08\x10-\x1F\x7F\"\\\\]").unwrap();
}
match self {
DispositionParam::Name(ref value) => write!(f, "name={}", value),
DispositionParam::Filename(ref value) => {
write!(f, "filename=\"{}\"", RE.replace_all(value, "\\$0").as_ref())
}
DispositionParam::Unknown(ref name, ref value) => write!(
f,
"{}=\"{}\"",
name,
&RE.replace_all(value, "\\$0").as_ref()
),
DispositionParam::FilenameExt(ref ext_value) => {
write!(f, "filename*={}", ext_value)
}
DispositionParam::UnknownExt(ref name, ref ext_value) => {
write!(f, "{}*={}", name, ext_value)
}
}
}
}
impl fmt::Display for ContentDisposition {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.disposition)?;
self.parameters
.iter()
.map(|param| write!(f, "; {}", param))
.collect()
}
}
#[cfg(test)]
mod tests {
use super::{ContentDisposition, DispositionParam, DispositionType};
use crate::header::shared::Charset;
use crate::header::{ExtendedValue, HeaderValue};
#[test]
fn test_from_raw_basic() {
assert!(ContentDisposition::from_raw(&HeaderValue::from_static("")).is_err());
let a = HeaderValue::from_static(
"form-data; dummy=3; name=upload; filename=\"sample.png\"",
);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::FormData,
parameters: vec![
DispositionParam::Unknown("dummy".to_owned(), "3".to_owned()),
DispositionParam::Name("upload".to_owned()),
DispositionParam::Filename("sample.png".to_owned()),
],
};
assert_eq!(a, b);
let a = HeaderValue::from_static("attachment; filename=\"image.jpg\"");
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::Attachment,
parameters: vec![DispositionParam::Filename("image.jpg".to_owned())],
};
assert_eq!(a, b);
let a = HeaderValue::from_static("inline; filename=image.jpg");
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::Inline,
parameters: vec![DispositionParam::Filename("image.jpg".to_owned())],
};
assert_eq!(a, b);
let a = HeaderValue::from_static(
"attachment; creation-date=\"Wed, 12 Feb 1997 16:29:51 -0500\"",
);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::Attachment,
parameters: vec![DispositionParam::Unknown(
String::from("creation-date"),
"Wed, 12 Feb 1997 16:29:51 -0500".to_owned(),
)],
};
assert_eq!(a, b);
}
#[test]
fn test_from_raw_extended() {
let a = HeaderValue::from_static(
"attachment; filename*=UTF-8''%c2%a3%20and%20%e2%82%ac%20rates",
);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::Attachment,
parameters: vec![DispositionParam::FilenameExt(ExtendedValue {
charset: Charset::Ext(String::from("UTF-8")),
language_tag: None,
value: vec![
0xc2, 0xa3, 0x20, b'a', b'n', b'd', 0x20, 0xe2, 0x82, 0xac, 0x20,
b'r', b'a', b't', b'e', b's',
],
})],
};
assert_eq!(a, b);
let a = HeaderValue::from_static(
"attachment; filename*=UTF-8''%c2%a3%20and%20%e2%82%ac%20rates",
);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::Attachment,
parameters: vec![DispositionParam::FilenameExt(ExtendedValue {
charset: Charset::Ext(String::from("UTF-8")),
language_tag: None,
value: vec![
0xc2, 0xa3, 0x20, b'a', b'n', b'd', 0x20, 0xe2, 0x82, 0xac, 0x20,
b'r', b'a', b't', b'e', b's',
],
})],
};
assert_eq!(a, b);
}
#[test]
fn test_from_raw_extra_whitespace() {
let a = HeaderValue::from_static(
"form-data ; du-mmy= 3 ; name =upload ; filename = \"sample.png\" ; ",
);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::FormData,
parameters: vec![
DispositionParam::Unknown("du-mmy".to_owned(), "3".to_owned()),
DispositionParam::Name("upload".to_owned()),
DispositionParam::Filename("sample.png".to_owned()),
],
};
assert_eq!(a, b);
}
#[test]
fn test_from_raw_unordered() {
let a = HeaderValue::from_static(
"form-data; dummy=3; filename=\"sample.png\" ; name=upload;",
// Actually, a trailling semolocon is not compliant. But it is fine to accept.
);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::FormData,
parameters: vec![
DispositionParam::Unknown("dummy".to_owned(), "3".to_owned()),
DispositionParam::Filename("sample.png".to_owned()),
DispositionParam::Name("upload".to_owned()),
],
};
assert_eq!(a, b);
let a = HeaderValue::from_str(
"attachment; filename*=iso-8859-1''foo-%E4.html; filename=\"foo-ä.html\"",
)
.unwrap();
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::Attachment,
parameters: vec![
DispositionParam::FilenameExt(ExtendedValue {
charset: Charset::Iso_8859_1,
language_tag: None,
value: b"foo-\xe4.html".to_vec(),
}),
DispositionParam::Filename("foo-ä.html".to_owned()),
],
};
assert_eq!(a, b);
}
#[test]
fn test_from_raw_only_disp() {
let a = ContentDisposition::from_raw(&HeaderValue::from_static("attachment"))
.unwrap();
let b = ContentDisposition {
disposition: DispositionType::Attachment,
parameters: vec![],
};
assert_eq!(a, b);
let a =
ContentDisposition::from_raw(&HeaderValue::from_static("inline ;")).unwrap();
let b = ContentDisposition {
disposition: DispositionType::Inline,
parameters: vec![],
};
assert_eq!(a, b);
let a = ContentDisposition::from_raw(&HeaderValue::from_static(
"unknown-disp-param",
))
.unwrap();
let b = ContentDisposition {
disposition: DispositionType::Ext(String::from("unknown-disp-param")),
parameters: vec![],
};
assert_eq!(a, b);
}
#[test]
fn from_raw_with_mixed_case() {
let a = HeaderValue::from_str(
"InLInE; fIlenAME*=iso-8859-1''foo-%E4.html; filEName=\"foo-ä.html\"",
)
.unwrap();
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::Inline,
parameters: vec![
DispositionParam::FilenameExt(ExtendedValue {
charset: Charset::Iso_8859_1,
language_tag: None,
value: b"foo-\xe4.html".to_vec(),
}),
DispositionParam::Filename("foo-ä.html".to_owned()),
],
};
assert_eq!(a, b);
}
#[test]
fn from_raw_with_unicode() {
/* RFC7578 Section 4.2:
Some commonly deployed systems use multipart/form-data with file names directly encoded
including octets outside the US-ASCII range. The encoding used for the file names is
typically UTF-8, although HTML forms will use the charset associated with the form.
Mainstream browsers like Firefox (gecko) and Chrome use UTF-8 directly as above.
(And now, only UTF-8 is handled by this implementation.)
*/
let a =
HeaderValue::from_str("form-data; name=upload; filename=\"文件.webp\"")
.unwrap();
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::FormData,
parameters: vec![
DispositionParam::Name(String::from("upload")),
DispositionParam::Filename(String::from("文件.webp")),
],
};
assert_eq!(a, b);
let a = HeaderValue::from_str(
"form-data; name=upload; filename=\"余固知謇謇之為患兮,忍而不能舍也.pptx\"",
)
.unwrap();
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::FormData,
parameters: vec![
DispositionParam::Name(String::from("upload")),
DispositionParam::Filename(String::from(
"余固知謇謇之為患兮,忍而不能舍也.pptx",
)),
],
};
assert_eq!(a, b);
}
#[test]
fn test_from_raw_escape() {
let a = HeaderValue::from_static(
"form-data; dummy=3; name=upload; filename=\"s\\amp\\\"le.png\"",
);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::FormData,
parameters: vec![
DispositionParam::Unknown("dummy".to_owned(), "3".to_owned()),
DispositionParam::Name("upload".to_owned()),
DispositionParam::Filename(
['s', 'a', 'm', 'p', '\"', 'l', 'e', '.', 'p', 'n', 'g']
.iter()
.collect(),
),
],
};
assert_eq!(a, b);
}
#[test]
fn test_from_raw_semicolon() {
let a =
HeaderValue::from_static("form-data; filename=\"A semicolon here;.pdf\"");
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::FormData,
parameters: vec![DispositionParam::Filename(String::from(
"A semicolon here;.pdf",
))],
};
assert_eq!(a, b);
}
#[test]
fn test_from_raw_uncessary_percent_decode() {
// In fact, RFC7578 (multipart/form-data) Section 2 and 4.2 suggests that filename with
// non-ASCII characters MAY be percent-encoded.
// On the contrary, RFC6266 or other RFCs related to Content-Disposition response header
// do not mention such percent-encoding.
// So, it appears to be undecidable whether to percent-decode or not without
// knowing the usage scenario (multipart/form-data v.s. HTTP response header) and
// inevitable to unnecessarily percent-decode filename with %XX in the former scenario.
// Fortunately, it seems that almost all mainstream browsers just send UTF-8 encoded file
// names in quoted-string format (tested on Edge, IE11, Chrome and Firefox) without
// percent-encoding. So we do not bother to attempt to percent-decode.
let a = HeaderValue::from_static(
"form-data; name=photo; filename=\"%74%65%73%74%2e%70%6e%67\"",
);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::FormData,
parameters: vec![
DispositionParam::Name("photo".to_owned()),
DispositionParam::Filename(String::from("%74%65%73%74%2e%70%6e%67")),
],
};
assert_eq!(a, b);
let a = HeaderValue::from_static(
"form-data; name=photo; filename=\"%74%65%73%74.png\"",
);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::FormData,
parameters: vec![
DispositionParam::Name("photo".to_owned()),
DispositionParam::Filename(String::from("%74%65%73%74.png")),
],
};
assert_eq!(a, b);
}
#[test]
fn test_from_raw_param_value_missing() {
let a = HeaderValue::from_static("form-data; name=upload ; filename=");
assert!(ContentDisposition::from_raw(&a).is_err());
let a = HeaderValue::from_static("attachment; dummy=; filename=invoice.pdf");
assert!(ContentDisposition::from_raw(&a).is_err());
let a = HeaderValue::from_static("inline; filename= ");
assert!(ContentDisposition::from_raw(&a).is_err());
let a = HeaderValue::from_static("inline; filename=\"\"");
assert!(ContentDisposition::from_raw(&a).expect("parse cd").get_filename().expect("filename").is_empty());
}
#[test]
fn test_from_raw_param_name_missing() {
let a = HeaderValue::from_static("inline; =\"test.txt\"");
assert!(ContentDisposition::from_raw(&a).is_err());
let a = HeaderValue::from_static("inline; =diary.odt");
assert!(ContentDisposition::from_raw(&a).is_err());
let a = HeaderValue::from_static("inline; =");
assert!(ContentDisposition::from_raw(&a).is_err());
}
#[test]
fn test_display_extended() {
let as_string =
"attachment; filename*=UTF-8'en'%C2%A3%20and%20%E2%82%AC%20rates";
let a = HeaderValue::from_static(as_string);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let display_rendered = format!("{}", a);
assert_eq!(as_string, display_rendered);
let a = HeaderValue::from_static("attachment; filename=colourful.csv");
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let display_rendered = format!("{}", a);
assert_eq!(
"attachment; filename=\"colourful.csv\"".to_owned(),
display_rendered
);
}
#[test]
fn test_display_quote() {
let as_string = "form-data; name=upload; filename=\"Quote\\\"here.png\"";
as_string
.find(['\\', '\"'].iter().collect::<String>().as_str())
.unwrap(); // ensure `\"` is there
let a = HeaderValue::from_static(as_string);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let display_rendered = format!("{}", a);
assert_eq!(as_string, display_rendered);
}
#[test]
fn test_display_space_tab() {
let as_string = "form-data; name=upload; filename=\"Space here.png\"";
let a = HeaderValue::from_static(as_string);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let display_rendered = format!("{}", a);
assert_eq!(as_string, display_rendered);
let a: ContentDisposition = ContentDisposition {
disposition: DispositionType::Inline,
parameters: vec![DispositionParam::Filename(String::from("Tab\there.png"))],
};
let display_rendered = format!("{}", a);
assert_eq!("inline; filename=\"Tab\x09here.png\"", display_rendered);
}
#[test]
fn test_display_control_characters() {
/* let a = "attachment; filename=\"carriage\rreturn.png\"";
let a = HeaderValue::from_static(a);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let display_rendered = format!("{}", a);
assert_eq!(
"attachment; filename=\"carriage\\\rreturn.png\"",
display_rendered
);*/
// No way to create a HeaderValue containing a carriage return.
let a: ContentDisposition = ContentDisposition {
disposition: DispositionType::Inline,
parameters: vec![DispositionParam::Filename(String::from("bell\x07.png"))],
};
let display_rendered = format!("{}", a);
assert_eq!("inline; filename=\"bell\\\x07.png\"", display_rendered);
}
#[test]
fn test_param_methods() {
let param = DispositionParam::Filename(String::from("sample.txt"));
assert!(param.is_filename());
assert_eq!(param.as_filename().unwrap(), "sample.txt");
let param = DispositionParam::Unknown(String::from("foo"), String::from("bar"));
assert!(param.is_unknown("foo"));
assert_eq!(param.as_unknown("fOo"), Some("bar"));
}
#[test]
fn test_disposition_methods() {
let cd = ContentDisposition {
disposition: DispositionType::FormData,
parameters: vec![
DispositionParam::Unknown("dummy".to_owned(), "3".to_owned()),
DispositionParam::Name("upload".to_owned()),
DispositionParam::Filename("sample.png".to_owned()),
],
};
assert_eq!(cd.get_name(), Some("upload"));
assert_eq!(cd.get_unknown("dummy"), Some("3"));
assert_eq!(cd.get_filename(), Some("sample.png"));
assert_eq!(cd.get_unknown_ext("dummy"), None);
assert_eq!(cd.get_unknown("duMMy"), Some("3"));
}
}

View File

@ -1,5 +1,5 @@
use crate::header::{QualityItem, CONTENT_LANGUAGE};
use language_tags::LanguageTag; use language_tags::LanguageTag;
use header::{CONTENT_LANGUAGE, QualityItem};
header! { header! {
/// `Content-Language` header, defined in /// `Content-Language` header, defined in
@ -24,13 +24,13 @@ header! {
/// # Examples /// # Examples
/// ///
/// ```rust /// ```rust
/// # extern crate actix_web; /// # extern crate actix_http;
/// # #[macro_use] extern crate language_tags; /// # #[macro_use] extern crate language_tags;
/// use actix_web::HttpResponse; /// use actix_http::Response;
/// # use actix_web::http::header::{ContentLanguage, qitem}; /// # use actix_http::http::header::{ContentLanguage, qitem};
/// # /// #
/// # fn main() { /// # fn main() {
/// let mut builder = HttpResponse::Ok(); /// let mut builder = Response::Ok();
/// builder.set( /// builder.set(
/// ContentLanguage(vec![ /// ContentLanguage(vec![
/// qitem(langtag!(en)), /// qitem(langtag!(en)),
@ -40,14 +40,14 @@ header! {
/// ``` /// ```
/// ///
/// ```rust /// ```rust
/// # extern crate actix_web; /// # extern crate actix_http;
/// # #[macro_use] extern crate language_tags; /// # #[macro_use] extern crate language_tags;
/// use actix_web::HttpResponse; /// use actix_http::Response;
/// # use actix_web::http::header::{ContentLanguage, qitem}; /// # use actix_http::http::header::{ContentLanguage, qitem};
/// # /// #
/// # fn main() { /// # fn main() {
/// ///
/// let mut builder = HttpResponse::Ok(); /// let mut builder = Response::Ok();
/// builder.set( /// builder.set(
/// ContentLanguage(vec![ /// ContentLanguage(vec![
/// qitem(langtag!(da)), /// qitem(langtag!(da)),

View File

@ -1,8 +1,10 @@
use std::fmt::{self, Display, Write}; use std::fmt::{self, Display, Write};
use std::str::FromStr; use std::str::FromStr;
use error::ParseError;
use header::{IntoHeaderValue, Writer, use crate::error::ParseError;
HeaderValue, InvalidHeaderValueBytes, CONTENT_RANGE}; use crate::header::{
HeaderValue, IntoHeaderValue, InvalidHeaderValueBytes, Writer, CONTENT_RANGE,
};
header! { header! {
/// `Content-Range` header, defined in /// `Content-Range` header, defined in
@ -69,7 +71,6 @@ header! {
} }
} }
/// Content-Range, described in [RFC7233](https://tools.ietf.org/html/rfc7233#section-4.2) /// Content-Range, described in [RFC7233](https://tools.ietf.org/html/rfc7233#section-4.2)
/// ///
/// # ABNF /// # ABNF
@ -99,7 +100,7 @@ pub enum ContentRangeSpec {
range: Option<(u64, u64)>, range: Option<(u64, u64)>,
/// Total length of the instance, can be omitted if unknown /// Total length of the instance, can be omitted if unknown
instance_length: Option<u64> instance_length: Option<u64>,
}, },
/// Custom range, with unit not registered at IANA /// Custom range, with unit not registered at IANA
@ -108,15 +109,15 @@ pub enum ContentRangeSpec {
unit: String, unit: String,
/// other-range-resp /// other-range-resp
resp: String resp: String,
} },
} }
fn split_in_two(s: &str, separator: char) -> Option<(&str, &str)> { fn split_in_two(s: &str, separator: char) -> Option<(&str, &str)> {
let mut iter = s.splitn(2, separator); let mut iter = s.splitn(2, separator);
match (iter.next(), iter.next()) { match (iter.next(), iter.next()) {
(Some(a), Some(b)) => Some((a, b)), (Some(a), Some(b)) => Some((a, b)),
_ => None _ => None,
} }
} }
@ -126,40 +127,39 @@ impl FromStr for ContentRangeSpec {
fn from_str(s: &str) -> Result<Self, ParseError> { fn from_str(s: &str) -> Result<Self, ParseError> {
let res = match split_in_two(s, ' ') { let res = match split_in_two(s, ' ') {
Some(("bytes", resp)) => { Some(("bytes", resp)) => {
let (range, instance_length) = split_in_two( let (range, instance_length) =
resp, '/').ok_or(ParseError::Header)?; split_in_two(resp, '/').ok_or(ParseError::Header)?;
let instance_length = if instance_length == "*" { let instance_length = if instance_length == "*" {
None None
} else { } else {
Some(instance_length.parse() Some(instance_length.parse().map_err(|_| ParseError::Header)?)
.map_err(|_| ParseError::Header)?)
}; };
let range = if range == "*" { let range = if range == "*" {
None None
} else { } else {
let (first_byte, last_byte) = split_in_two( let (first_byte, last_byte) =
range, '-').ok_or(ParseError::Header)?; split_in_two(range, '-').ok_or(ParseError::Header)?;
let first_byte = first_byte.parse() let first_byte =
.map_err(|_| ParseError::Header)?; first_byte.parse().map_err(|_| ParseError::Header)?;
let last_byte = last_byte.parse() let last_byte = last_byte.parse().map_err(|_| ParseError::Header)?;
.map_err(|_| ParseError::Header)?;
if last_byte < first_byte { if last_byte < first_byte {
return Err(ParseError::Header); return Err(ParseError::Header);
} }
Some((first_byte, last_byte)) Some((first_byte, last_byte))
}; };
ContentRangeSpec::Bytes {range, instance_length} ContentRangeSpec::Bytes {
} range,
Some((unit, resp)) => { instance_length,
ContentRangeSpec::Unregistered {
unit: unit.to_owned(),
resp: resp.to_owned()
} }
} }
_ => return Err(ParseError::Header) Some((unit, resp)) => ContentRangeSpec::Unregistered {
unit: unit.to_owned(),
resp: resp.to_owned(),
},
_ => return Err(ParseError::Header),
}; };
Ok(res) Ok(res)
} }
@ -168,17 +168,20 @@ impl FromStr for ContentRangeSpec {
impl Display for ContentRangeSpec { impl Display for ContentRangeSpec {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self { match *self {
ContentRangeSpec::Bytes { range, instance_length } => { ContentRangeSpec::Bytes {
try!(f.write_str("bytes ")); range,
instance_length,
} => {
f.write_str("bytes ")?;
match range { match range {
Some((first_byte, last_byte)) => { Some((first_byte, last_byte)) => {
try!(write!(f, "{}-{}", first_byte, last_byte)); write!(f, "{}-{}", first_byte, last_byte)?;
}, }
None => { None => {
try!(f.write_str("*")); f.write_str("*")?;
} }
}; };
try!(f.write_str("/")); f.write_str("/")?;
if let Some(v) = instance_length { if let Some(v) = instance_length {
write!(f, "{}", v) write!(f, "{}", v)
} else { } else {
@ -186,8 +189,8 @@ impl Display for ContentRangeSpec {
} }
} }
ContentRangeSpec::Unregistered { ref unit, ref resp } => { ContentRangeSpec::Unregistered { ref unit, ref resp } => {
try!(f.write_str(unit)); f.write_str(unit)?;
try!(f.write_str(" ")); f.write_str(" ")?;
f.write_str(resp) f.write_str(resp)
} }
} }

Some files were not shown because too many files have changed in this diff Show More