1
0
mirror of https://github.com/fafhrd91/actix-web synced 2025-07-16 14:45:47 +02:00

Compare commits

...

121 Commits

Author SHA1 Message Date
Nikolay Kim
4d17a9afcc update version 2018-10-09 11:42:52 -07:00
Nikolay Kim
65e9201b4d Fixed panic during graceful shutdown 2018-10-09 11:35:57 -07:00
Nikolay Kim
c3ad516f56 disable shutdown atm 2018-10-09 09:45:24 -07:00
Nikolay Kim
93b1c5fd46 update deps 2018-10-08 21:58:37 -07:00
Nikolay Kim
4e7fac08b9 do not override content-length header 2018-10-08 15:30:59 -07:00
Nikolay Kim
07f6ca4b71 Merge branch 'master' of github.com:actix/actix-web 2018-10-08 13:06:49 -07:00
Nikolay Kim
03d988b898 refactor date rendering 2018-10-08 10:16:19 -07:00
Nikolay Kim
cfad5bf1f3 enable slow request timeout for h2 dispatcher 2018-10-08 07:47:42 -07:00
Danil Berestov
10678a22af test content length (#532) 2018-10-06 08:17:20 +03:00
lzx
7ae5a43877 httpresponse.rs doc fix (#534) 2018-10-06 08:16:12 +03:00
Nikolay Kim
1e1a4f846e use actix-net cell features 2018-10-02 22:23:51 -07:00
Nikolay Kim
49eea3bf76 travis config 2018-10-02 20:22:51 -07:00
Nikolay Kim
b0677aa029 fix stable compatibility 2018-10-02 19:42:24 -07:00
Nikolay Kim
401ea574c0 make AcceptorTimeout::new public 2018-10-02 19:31:30 -07:00
Nikolay Kim
bbcd618304 export AcceptorTimeout 2018-10-02 19:12:08 -07:00
Nikolay Kim
1f68ce8541 fix tests 2018-10-02 19:05:58 -07:00
Nikolay Kim
2710f70e39 add H1 transport 2018-10-02 17:30:29 -07:00
Nikolay Kim
ae5c4dfb78 refactor http channels list; rename WorkerSettings 2018-10-02 15:25:32 -07:00
Nikolay Kim
d7379bd10b update server ssl tests; upgrade rustls 2018-10-02 13:41:33 -07:00
Nikolay Kim
b59712c439 add ssl handshake timeout tests 2018-10-02 11:32:43 -07:00
Nikolay Kim
724668910b fix ssh handshake timeout 2018-10-02 11:18:59 -07:00
Nikolay Kim
61c7534e03 fix stream flushing 2018-10-02 10:43:23 -07:00
Douman
f8b176de9e Fix no_http2 flag in HttpServer (#526) 2018-10-02 20:09:31 +03:00
Danil Berestov
c8505bb53f content-length bug fix (#525)
* content-length bug fix

* changes.md is updated

* typo
2018-10-02 09:15:48 -07:00
Nikolay Kim
eed377e773 uneeded dep 2018-10-02 00:20:27 -07:00
Nikolay Kim
f3ce6574e4 fix client timer and add slow request tests 2018-10-02 00:19:28 -07:00
Nikolay Kim
f007860a16 cleanup warnings 2018-10-01 22:48:11 -07:00
Nikolay Kim
fdfadb52e1 fix doc test for State 2018-10-01 22:29:30 -07:00
Nikolay Kim
368f73513a set tcp-keepalive for test as well 2018-10-01 22:25:53 -07:00
Nikolay Kim
c674ea9126 add StreamConfiguration service 2018-10-01 22:23:02 -07:00
Nikolay Kim
7c78797d9b proper stop for test_ws_stopped test 2018-10-01 21:30:00 -07:00
Nikolay Kim
84edc57fd9 increase sleep time 2018-10-01 21:19:27 -07:00
Nikolay Kim
127af92541 clippy warnings 2018-10-01 21:16:56 -07:00
Nikolay Kim
e4686f6c8d set socket linger to 0 on timeout 2018-10-01 20:53:22 -07:00
Nikolay Kim
1bac65de4c add websocket stopped test 2018-10-01 20:15:26 -07:00
Nikolay Kim
16945a554a add client shutdown timeout 2018-10-01 20:04:16 -07:00
Nikolay Kim
91af3ca148 simplify h1 dispatcher 2018-10-01 19:18:24 -07:00
Nikolay Kim
2217a152cb expose app error by http service 2018-10-01 15:19:49 -07:00
Nikolay Kim
c1e0b4f322 expose internal http server types and allow to create custom http pipelines 2018-10-01 14:43:06 -07:00
Nikolay Kim
5966ee6192 add HttpServer::register() function, allows to register services in actix net server 2018-09-28 16:03:53 -07:00
Nikolay Kim
4aac3d6a92 refactor keep-alive timer 2018-09-28 15:04:59 -07:00
Nikolay Kim
e95babf8d3 log acctor init errors 2018-09-28 12:37:20 -07:00
Nikolay Kim
f2d42e5e77 refactor acceptor error handling 2018-09-28 11:50:47 -07:00
Nikolay Kim
0f1c80ccc6 deprecate start_incoming 2018-09-28 08:45:49 -07:00
Nikolay Kim
fc5088b55e fix tarpaulin args 2018-09-28 00:08:23 -07:00
Nikolay Kim
bec37fdbd5 update travis config 2018-09-27 22:23:29 -07:00
Nikolay Kim
4b59ae2476 fix ssl config for client connector 2018-09-27 22:15:38 -07:00
Nikolay Kim
d0fc9d7b99 simplify listen_ and bind_ methods 2018-09-27 21:55:44 -07:00
Nikolay Kim
1ff86e5ac4 restore rust-tls support 2018-09-27 21:24:21 -07:00
Nikolay Kim
ecfda64f6d add native-tls support 2018-09-27 20:40:34 -07:00
Nikolay Kim
0bca21ec6d fix ssl tests 2018-09-27 19:57:40 -07:00
Nikolay Kim
3173c9fa83 diesable client timeout for tcp stream acceptor 2018-09-27 19:34:07 -07:00
Nikolay Kim
85445ea809 rename and simplify ServiceFactory trait 2018-09-27 18:33:29 -07:00
Nikolay Kim
d57579d700 refactor acceptor pipeline add client timeout 2018-09-27 18:33:29 -07:00
Nikolay Kim
b6a1cfa6ad update openssl support 2018-09-27 18:33:29 -07:00
Nikolay Kim
9f1417af30 refactor http service builder 2018-09-27 18:33:29 -07:00
Nikolay Kim
0aa0f326f7 fix changes from master 2018-09-27 18:33:29 -07:00
Nikolay Kim
dbb4fab4f7 separate mod for HttpHandler; add HttpHandler impl for Vec<H> 2018-09-27 18:33:29 -07:00
Nikolay Kim
6f3e70a92a simplify application factory 2018-09-27 18:33:29 -07:00
Nikolay Kim
a63d3f9a7a cleanup ServerFactory trait 2018-09-27 18:33:29 -07:00
Nikolay Kim
a3cfc24232 refactor acceptor service 2018-09-27 18:33:29 -07:00
Nikolay Kim
6a61138bf8 enable ssl feature 2018-09-27 18:33:29 -07:00
Nikolay Kim
7cf9af9b55 disable ssl for travis 2018-09-27 18:33:29 -07:00
Nikolay Kim
c9a52e3197 refactor date generatioin 2018-09-27 18:33:29 -07:00
Nikolay Kim
1907102685 switch to actix-net server 2018-09-27 18:33:29 -07:00
Nikolay Kim
52195bbf16 update version 2018-09-27 18:17:58 -07:00
sapir
59deb4b40d Try to separate HTTP/1 read & write disconnect handling, to fix #511. (#514) 2018-09-27 18:15:02 -07:00
Ashley
782eeb5ded Reduced unsafe converage (#520) 2018-09-26 11:56:34 +03:00
Douman
1b298142e3 Correct composing of multiple origins in cors (#518) 2018-09-21 08:45:22 +03:00
Douman
0dc96658f2 Send response to inform client of error (#515) 2018-09-21 07:24:10 +03:00
Nikolay Kim
f40153fca4 fix node::insert() method, missing next element 2018-09-17 11:39:03 -07:00
Nikolay Kim
764103566d update changes 2018-09-17 10:48:37 -07:00
Nikolay Kim
bfb2f2e9e1 fix node.remove(), update next node pointer 2018-09-17 10:25:45 -07:00
Nikolay Kim
599e6b3385 refactor channel node remove operation 2018-09-17 05:29:07 -07:00
Nikolay Kim
03e318f446 update changes 2018-09-15 17:10:53 -07:00
Nikolay Kim
7449884ce3 fix wrong error message for path deserialize for i32 #510 2018-09-15 17:09:07 -07:00
Nikolay Kim
bbe69e5b8d update version 2018-09-15 10:00:54 -07:00
Nikolay Kim
9d1eefc38f use 5 seconds keep-alive timer by default 2018-09-15 09:57:54 -07:00
Nikolay Kim
d65c72b44d use server keep-alive timer as slow request timer 2018-09-15 09:55:38 -07:00
Nikolay Kim
c3f8b5cf22 clippy warnings 2018-09-11 11:25:32 -07:00
Nikolay Kim
70a3f317d3 fix failing requests to test server #508 2018-09-11 11:24:05 -07:00
Nikolay Kim
513c8ec1ce Merge pull request #505 from Neopallium/master
Fix issue with HttpChannel linked list.
2018-09-11 11:18:33 -07:00
Robert G. Jakabosky
04608b2ea6 Update changes. 2018-09-12 00:27:15 +08:00
Robert G. Jakabosky
70b45659e2 Make Node's traverse method take a closure instead of calling shutdown on each HttpChannel. 2018-09-12 00:27:15 +08:00
Robert G. Jakabosky
e0ae6b10cd Fix bug with HttpChannel linked list. 2018-09-12 00:27:15 +08:00
Maciej Piechotka
003b05b095 Don't ignore errors in std::fmt::Debug implementations (#506) 2018-09-11 14:57:55 +03:00
Nikolay Kim
cdb57b840e prepare release 2018-09-07 20:47:54 -07:00
Nikolay Kim
002bb24b26 unhide SessionBackend and SessionImpl traits and cleanup warnings 2018-09-07 20:46:43 -07:00
Nikolay Kim
51982b3fec Merge pull request #503 from uzytkownik/route-regex
Refactor resource route parsing to allow repetition in the regexes
2018-09-07 20:19:31 -07:00
Maciej Piechotka
4251b0bc10 Refactor resource route parsing to allow repetition in the regexes 2018-09-06 08:51:55 +02:00
Nikolay Kim
42f3773bec update changes 2018-09-05 09:03:58 -07:00
Jan Michael Auer
86fdbb47a5 Fix system_exit in HttpServer (#501) 2018-09-05 10:41:23 +02:00
Nikolay Kim
4ca9fd2ad1 remove debug print 2018-09-03 22:09:12 -07:00
Nikolay Kim
f0f67072ae Read client response until eof if connection header set to close #464 2018-09-03 21:35:59 -07:00
Nikolay Kim
24d1228943 simplify handler path processing 2018-09-03 11:28:47 -07:00
Nikolay Kim
b7a73e0a4f fix Scope::handler doc test 2018-09-02 08:51:26 -07:00
Nikolay Kim
968c81e267 Handling scoped paths without leading slashes #460 2018-09-02 08:14:54 -07:00
Nikolay Kim
d5957a8466 Merge branch 'master' of https://github.com/actix/actix-web 2018-09-02 07:47:45 -07:00
Nikolay Kim
f2f05e7715 allow to register handlers on scope level #465 2018-09-02 07:47:19 -07:00
Markus Unterwaditzer
3439f55288 doc: Add example for using custom nativetls connector (#497) 2018-09-01 18:13:52 +03:00
Robert Gabriel Jakabosky
0425e2776f Fix Issue #490 (#498)
* Add failing testcase for HTTP 404 response with no reason text.

* Include canonical reason test for HTTP error responses.

* Don't send a reason for unknown status codes.
2018-09-01 12:00:32 +03:00
Nikolay Kim
6464f96f8b Merge branch 'master' of https://github.com/actix/actix-web 2018-08-31 18:56:53 -07:00
Nikolay Kim
a2b170fec9 fmt 2018-08-31 18:56:21 -07:00
Nikolay Kim
0b42cae082 update tests 2018-08-31 18:54:19 -07:00
Nikolay Kim
c313c003a4 Fix typo 2018-08-31 17:45:29 -07:00
Nikolay Kim
3fa23f5e10 update version 2018-08-31 17:25:15 -07:00
Nikolay Kim
2d51831899 handle socket read disconnect 2018-08-31 17:24:13 -07:00
Nikolay Kim
e59abfd716 Merge pull request #496 from Neopallium/master
Fix issue with 'Connection: close' in ClientRequest
2018-08-31 17:17:39 -07:00
Robert G. Jakabosky
66881d7dd1 If buffer is empty, read more data before calling parser. 2018-09-01 02:25:05 +08:00
Robert G. Jakabosky
a42a8a2321 Add some comments to clarify logic. 2018-09-01 02:15:36 +08:00
Robert G. Jakabosky
2341656173 Simplify buffer reading logic. Remove duplicate code. 2018-09-01 01:41:38 +08:00
Robert G. Jakabosky
487519acec Add client test for 'Connection: close' as reported in issue #495 2018-09-01 00:34:19 +08:00
Robert Gabriel Jakabosky
af6caa92c8 Merge branch 'master' into master 2018-09-01 00:17:34 +08:00
Robert G. Jakabosky
3ccbce6bc8 Fix issue with 'Connection: close' in ClientRequest 2018-09-01 00:08:53 +08:00
Armin Ronacher
797b52ecbf Update CHANGES.md 2018-08-29 20:58:23 +02:00
Markus Unterwaditzer
4bab50c861 Add ability to pass a custom TlsConnector (#491) 2018-08-29 20:53:31 +02:00
Nikolay Kim
5906971b6d Merge pull request #483 from Neopallium/master
Fix bug with client disconnect immediately after receiving http request.
2018-08-26 10:15:25 -07:00
Robert G. Jakabosky
8393d09a0f Fix tests. 2018-08-27 00:31:31 +08:00
Robert G. Jakabosky
c3ae9997fc Fix bug with http1 client disconnects. 2018-08-26 22:21:05 +08:00
Nikolay Kim
d39dcc58cd Merge pull request #482 from 0x1793d1/master
Fix server startup log message
2018-08-24 20:53:45 -07:00
0x1793d1
471a3e9806 Fix server startup log message 2018-08-24 23:21:32 +02:00
61 changed files with 3910 additions and 3102 deletions

View File

@@ -1,6 +1,6 @@
environment: environment:
global: global:
PROJECT_NAME: actix PROJECT_NAME: actix-web
matrix: matrix:
# Stable channel # Stable channel
- TARGET: i686-pc-windows-msvc - TARGET: i686-pc-windows-msvc
@@ -37,4 +37,5 @@ build: false
# Equivalent to Travis' `script` phase # Equivalent to Travis' `script` phase
test_script: test_script:
- cargo clean
- cargo test --no-default-features --features="flate2-rust" - cargo test --no-default-features --features="flate2-rust"

View File

@@ -30,14 +30,14 @@ before_script:
script: script:
- | - |
if [[ "$TRAVIS_RUST_VERSION" != "stable" ]]; then if [[ "$TRAVIS_RUST_VERSION" != "nightly" ]]; then
cargo clean cargo clean
cargo test --features="alpn,tls,rust-tls" -- --nocapture cargo test --features="ssl,tls,rust-tls" -- --nocapture
fi fi
- | - |
if [[ "$TRAVIS_RUST_VERSION" == "stable" ]]; then if [[ "$TRAVIS_RUST_VERSION" == "nightly" ]]; then
RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install -f cargo-tarpaulin RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install -f cargo-tarpaulin
cargo tarpaulin --features="alpn,tls,rust-tls" --out Xml --no-count RUST_BACKTRACE=1 cargo tarpaulin --features="ssl,tls,rust-tls" --out Xml
bash <(curl -s https://codecov.io/bash) bash <(curl -s https://codecov.io/bash)
echo "Uploaded code coverage" echo "Uploaded code coverage"
fi fi
@@ -45,8 +45,8 @@ script:
# Upload docs # Upload docs
after_success: after_success:
- | - |
if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_PULL_REQUEST" = "false" && "$TRAVIS_BRANCH" == "master" && "$TRAVIS_RUST_VERSION" == "beta" ]]; then if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_PULL_REQUEST" = "false" && "$TRAVIS_BRANCH" == "master" && "$TRAVIS_RUST_VERSION" == "stable" ]]; then
cargo doc --features "alpn, tls, rust-tls, session" --no-deps && cargo doc --features "ssl,tls,rust-tls,session" --no-deps &&
echo "<meta http-equiv=refresh content=0;url=os_balloon/index.html>" > target/doc/index.html && echo "<meta http-equiv=refresh content=0;url=os_balloon/index.html>" > target/doc/index.html &&
git clone https://github.com/davisp/ghp-import.git && git clone https://github.com/davisp/ghp-import.git &&
./ghp-import/ghp_import.py -n -p -f -m "Documentation upload" -r https://"$GH_TOKEN"@github.com/"$TRAVIS_REPO_SLUG.git" target/doc && ./ghp-import/ghp_import.py -n -p -f -m "Documentation upload" -r https://"$GH_TOKEN"@github.com/"$TRAVIS_REPO_SLUG.git" target/doc &&

View File

@@ -1,5 +1,94 @@
# Changes # Changes
## [0.7.10] - 2018-10-09
### Fixed
* Fixed panic during graceful shutdown
## [0.7.9] - 2018-10-09
### Added
* Added client shutdown timeout setting
* Added slow request timeout setting
* Respond with 408 response on slow request timeout #523
### Fixed
* HTTP1 decoding errors are reported to the client. #512
* Correctly compose multiple allowed origins in CORS. #517
* Websocket server finished() isn't called if client disconnects #511
* Responses with the following codes: 100, 101, 102, 204 -- are sent without Content-Length header. #521
* Correct usage of `no_http2` flag in `bind_*` methods. #519
## [0.7.8] - 2018-09-17
### Added
* Use server `Keep-Alive` setting as slow request timeout #439
### Changed
* Use 5 seconds keep-alive timer by default.
### Fixed
* Fixed wrong error message for i16 type #510
## [0.7.7] - 2018-09-11
### Fixed
* Fix linked list of HttpChannels #504
* Fix requests to TestServer fail #508
## [0.7.6] - 2018-09-07
### Fixed
* Fix system_exit in HttpServer #501
* Fix parsing of route param containin regexes with repetition #500
### Changes
* Unhide `SessionBackend` and `SessionImpl` traits #455
## [0.7.5] - 2018-09-04
### Added
* Added the ability to pass a custom `TlsConnector`.
* Allow to register handlers on scope level #465
### Fixed
* Handle socket read disconnect
* Handling scoped paths without leading slashes #460
### Changed
* Read client response until eof if connection header set to close #464
## [0.7.4] - 2018-08-23 ## [0.7.4] - 2018-08-23
### Added ### Added

View File

@@ -1,6 +1,6 @@
[package] [package]
name = "actix-web" name = "actix-web"
version = "0.7.4" version = "0.7.10"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"] authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust." description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust."
readme = "README.md" readme = "README.md"
@@ -17,7 +17,7 @@ exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
build = "build.rs" build = "build.rs"
[package.metadata.docs.rs] [package.metadata.docs.rs]
features = ["tls", "alpn", "rust-tls", "session", "brotli", "flate2-c"] features = ["tls", "ssl", "rust-tls", "session", "brotli", "flate2-c"]
[badges] [badges]
travis-ci = { repository = "actix/actix-web", branch = "master" } travis-ci = { repository = "actix/actix-web", branch = "master" }
@@ -29,16 +29,19 @@ name = "actix_web"
path = "src/lib.rs" path = "src/lib.rs"
[features] [features]
default = ["session", "brotli", "flate2-c"] default = ["session", "brotli", "flate2-c", "cell"]
# tls # tls
tls = ["native-tls", "tokio-tls"] tls = ["native-tls", "tokio-tls", "actix-net/tls"]
# openssl # openssl
alpn = ["openssl", "tokio-openssl"] ssl = ["openssl", "tokio-openssl", "actix-net/ssl"]
# deprecated, use "ssl"
alpn = ["openssl", "tokio-openssl", "actix-net/ssl"]
# rustls # rustls
rust-tls = ["rustls", "tokio-rustls", "webpki", "webpki-roots"] rust-tls = ["rustls", "tokio-rustls", "webpki", "webpki-roots", "actix-net/rust-tls"]
# unix sockets # unix sockets
uds = ["tokio-uds"] uds = ["tokio-uds"]
@@ -55,8 +58,11 @@ flate2-c = ["flate2/miniz-sys"]
# rust backend for flate2 crate # rust backend for flate2 crate
flate2-rust = ["flate2/rust_backend"] flate2-rust = ["flate2/rust_backend"]
cell = ["actix-net/cell"]
[dependencies] [dependencies]
actix = "0.7.0" actix = "0.7.0"
actix-net = "0.1.0"
base64 = "0.9" base64 = "0.9"
bitflags = "1.0" bitflags = "1.0"
@@ -81,6 +87,7 @@ language-tags = "0.2"
lazy_static = "1.0" lazy_static = "1.0"
lazycell = "1.0.0" lazycell = "1.0.0"
parking_lot = "0.6" parking_lot = "0.6"
serde_urlencoded = "^0.5.3"
url = { version="1.7", features=["query_encoding"] } url = { version="1.7", features=["query_encoding"] }
cookie = { version="0.11", features=["percent-encode"] } cookie = { version="0.11", features=["percent-encode"] }
brotli2 = { version="^0.3.2", optional = true } brotli2 = { version="^0.3.2", optional = true }
@@ -101,6 +108,7 @@ tokio-io = "0.1"
tokio-tcp = "0.1" tokio-tcp = "0.1"
tokio-timer = "0.2" tokio-timer = "0.2"
tokio-reactor = "0.1" tokio-reactor = "0.1"
tokio-current-thread = "0.1"
# native-tls # native-tls
native-tls = { version="0.2", optional = true } native-tls = { version="0.2", optional = true }
@@ -111,16 +119,14 @@ openssl = { version="0.10", optional = true }
tokio-openssl = { version="0.2", optional = true } tokio-openssl = { version="0.2", optional = true }
#rustls #rustls
rustls = { version = "^0.13.1", optional = true } rustls = { version = "0.14", optional = true }
tokio-rustls = { version = "^0.7.2", optional = true } tokio-rustls = { version = "0.8", optional = true }
webpki = { version = "0.18", optional = true } webpki = { version = "0.18", optional = true }
webpki-roots = { version = "0.15", optional = true } webpki-roots = { version = "0.15", optional = true }
# unix sockets # unix sockets
tokio-uds = { version="0.2", optional = true } tokio-uds = { version="0.2", optional = true }
serde_urlencoded = "^0.5.3"
[dev-dependencies] [dev-dependencies]
env_logger = "0.5" env_logger = "0.5"
serde_derive = "1.0" serde_derive = "1.0"
@@ -132,8 +138,3 @@ version_check = "0.1"
lto = true lto = true
opt-level = 3 opt-level = 3
codegen-units = 1 codegen-units = 1
[workspace]
members = [
"./",
]

View File

@@ -135,7 +135,7 @@ where
/// instance for each thread, thus application state must be constructed /// instance for each thread, thus application state must be constructed
/// multiple times. If you want to share state between different /// multiple times. If you want to share state between different
/// threads, a shared object should be used, e.g. `Arc`. Application /// threads, a shared object should be used, e.g. `Arc`. Application
/// state does not need to be `Send` and `Sync`. /// state does not need to be `Send` or `Sync`.
pub fn with_state(state: S) -> App<S> { pub fn with_state(state: S) -> App<S> {
App { App {
parts: Some(ApplicationParts { parts: Some(ApplicationParts {
@@ -447,11 +447,8 @@ where
{ {
let mut path = path.trim().trim_right_matches('/').to_owned(); let mut path = path.trim().trim_right_matches('/').to_owned();
if !path.is_empty() && !path.starts_with('/') { if !path.is_empty() && !path.starts_with('/') {
path.insert(0, '/') path.insert(0, '/');
} };
if path.len() > 1 && path.ends_with('/') {
path.pop();
}
self.parts self.parts
.as_mut() .as_mut()
.expect("Use after finish") .expect("Use after finish")

View File

@@ -16,58 +16,43 @@ use http::{Error as HttpError, HttpTryFrom, Uri};
use tokio_io::{AsyncRead, AsyncWrite}; use tokio_io::{AsyncRead, AsyncWrite};
use tokio_timer::Delay; use tokio_timer::Delay;
#[cfg(feature = "alpn")] #[cfg(any(feature = "alpn", feature = "ssl"))]
use openssl::ssl::{Error as OpensslError, SslConnector, SslMethod}; use {
#[cfg(feature = "alpn")] openssl::ssl::{Error as SslError, SslConnector, SslMethod},
use tokio_openssl::SslConnectorExt; tokio_openssl::SslConnectorExt,
};
#[cfg(all(feature = "tls", not(feature = "alpn")))] #[cfg(all(
use native_tls::{Error as TlsError, TlsConnector as NativeTlsConnector}; feature = "tls",
#[cfg(all(feature = "tls", not(feature = "alpn")))] not(any(feature = "alpn", feature = "ssl", feature = "rust-tls"))
use tokio_tls::{TlsConnector}; ))]
use {
native_tls::{Error as SslError, TlsConnector as NativeTlsConnector},
tokio_tls::TlsConnector as SslConnector,
};
#[cfg( #[cfg(all(
all( feature = "rust-tls",
feature = "rust-tls", not(any(feature = "alpn", feature = "tls", feature = "ssl"))
not(any(feature = "alpn", feature = "tls")) ))]
) use {
)] rustls::ClientConfig, std::io::Error as SslError, std::sync::Arc,
use rustls::ClientConfig; tokio_rustls::ClientConfigExt, webpki::DNSNameRef, webpki_roots,
#[cfg( };
all(
feature = "rust-tls", #[cfg(all(
not(any(feature = "alpn", feature = "tls")) feature = "rust-tls",
) not(any(feature = "alpn", feature = "tls", feature = "ssl"))
)] ))]
use std::io::Error as TLSError; type SslConnector = Arc<ClientConfig>;
#[cfg(
all( #[cfg(not(any(
feature = "rust-tls", feature = "alpn",
not(any(feature = "alpn", feature = "tls")) feature = "ssl",
) feature = "tls",
)] feature = "rust-tls"
use std::sync::Arc; )))]
#[cfg( type SslConnector = ();
all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
)
)]
use tokio_rustls::ClientConfigExt;
#[cfg(
all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
)
)]
use webpki::DNSNameRef;
#[cfg(
all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
)
)]
use webpki_roots;
use server::IoStream; use server::IoStream;
use {HAS_OPENSSL, HAS_RUSTLS, HAS_TLS}; use {HAS_OPENSSL, HAS_RUSTLS, HAS_TLS};
@@ -173,24 +158,14 @@ pub enum ClientConnectorError {
SslIsNotSupported, SslIsNotSupported,
/// SSL error /// SSL error
#[cfg(feature = "alpn")] #[cfg(any(
feature = "tls",
feature = "alpn",
feature = "ssl",
feature = "rust-tls",
))]
#[fail(display = "{}", _0)] #[fail(display = "{}", _0)]
SslError(#[cause] OpensslError), SslError(#[cause] SslError),
/// SSL error
#[cfg(all(feature = "tls", not(feature = "alpn")))]
#[fail(display = "{}", _0)]
SslError(#[cause] TlsError),
/// SSL error
#[cfg(
all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
)
)]
#[fail(display = "{}", _0)]
SslError(#[cause] TLSError),
/// Resolver error /// Resolver error
#[fail(display = "{}", _0)] #[fail(display = "{}", _0)]
@@ -242,17 +217,8 @@ impl Paused {
/// `ClientConnector` type is responsible for transport layer of a /// `ClientConnector` type is responsible for transport layer of a
/// client connection. /// client connection.
pub struct ClientConnector { pub struct ClientConnector {
#[cfg(all(feature = "alpn"))] #[allow(dead_code)]
connector: SslConnector, connector: SslConnector,
#[cfg(all(feature = "tls", not(feature = "alpn")))]
connector: TlsConnector,
#[cfg(
all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
)
)]
connector: Arc<ClientConfig>,
stats: ClientConnectorStats, stats: ClientConnectorStats,
subscriber: Option<Recipient<ClientConnectorStats>>, subscriber: Option<Recipient<ClientConnectorStats>>,
@@ -293,76 +259,47 @@ impl SystemService for ClientConnector {}
impl Default for ClientConnector { impl Default for ClientConnector {
fn default() -> ClientConnector { fn default() -> ClientConnector {
#[cfg(all(feature = "alpn"))] let connector = {
{ #[cfg(all(any(feature = "alpn", feature = "ssl")))]
let builder = SslConnector::builder(SslMethod::tls()).unwrap(); {
ClientConnector::with_connector(builder.build()) SslConnector::builder(SslMethod::tls()).unwrap().build()
}
#[cfg(all(feature = "tls", not(feature = "alpn")))]
{
let (tx, rx) = mpsc::unbounded();
let builder = NativeTlsConnector::builder();
ClientConnector {
stats: ClientConnectorStats::default(),
subscriber: None,
acq_tx: tx,
acq_rx: Some(rx),
resolver: None,
connector: builder.build().unwrap().into(),
conn_lifetime: Duration::from_secs(75),
conn_keep_alive: Duration::from_secs(15),
limit: 100,
limit_per_host: 0,
acquired: 0,
acquired_per_host: HashMap::new(),
available: HashMap::new(),
to_close: Vec::new(),
waiters: Some(HashMap::new()),
wait_timeout: None,
paused: Paused::No,
} }
}
#[cfg(
all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
)
)]
{
let mut config = ClientConfig::new();
config
.root_store
.add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS);
ClientConnector::with_connector(config)
}
#[cfg(not(any(feature = "alpn", feature = "tls", feature = "rust-tls")))] #[cfg(all(
{ feature = "tls",
let (tx, rx) = mpsc::unbounded(); not(any(feature = "alpn", feature = "ssl", feature = "rust-tls"))
ClientConnector { ))]
stats: ClientConnectorStats::default(), {
subscriber: None, NativeTlsConnector::builder().build().unwrap().into()
acq_tx: tx,
acq_rx: Some(rx),
resolver: None,
conn_lifetime: Duration::from_secs(75),
conn_keep_alive: Duration::from_secs(15),
limit: 100,
limit_per_host: 0,
acquired: 0,
acquired_per_host: HashMap::new(),
available: HashMap::new(),
to_close: Vec::new(),
waiters: Some(HashMap::new()),
wait_timeout: None,
paused: Paused::No,
} }
}
#[cfg(all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls", feature = "ssl"))
))]
{
let mut config = ClientConfig::new();
config
.root_store
.add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS);
Arc::new(config)
}
#[cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(any(
feature = "alpn", feature = "ssl", feature = "tls", feature = "rust-tls")))]
{
()
}
};
#[cfg_attr(feature = "cargo-clippy", allow(clippy::let_unit_value))]
ClientConnector::with_connector_impl(connector)
} }
} }
impl ClientConnector { impl ClientConnector {
#[cfg(feature = "alpn")] #[cfg(any(feature = "alpn", feature = "ssl"))]
/// Create `ClientConnector` actor with custom `SslConnector` instance. /// Create `ClientConnector` actor with custom `SslConnector` instance.
/// ///
/// By default `ClientConnector` uses very a simple SSL configuration. /// By default `ClientConnector` uses very a simple SSL configuration.
@@ -375,7 +312,6 @@ impl ClientConnector {
/// # extern crate futures; /// # extern crate futures;
/// # use futures::{future, Future}; /// # use futures::{future, Future};
/// # use std::io::Write; /// # use std::io::Write;
/// # use std::process;
/// # use actix_web::actix::Actor; /// # use actix_web::actix::Actor;
/// extern crate openssl; /// extern crate openssl;
/// use actix_web::{actix, client::ClientConnector, client::Connect}; /// use actix_web::{actix, client::ClientConnector, client::Connect};
@@ -402,35 +338,14 @@ impl ClientConnector {
/// } /// }
/// ``` /// ```
pub fn with_connector(connector: SslConnector) -> ClientConnector { pub fn with_connector(connector: SslConnector) -> ClientConnector {
let (tx, rx) = mpsc::unbounded(); // keep level of indirection for docstrings matching featureflags
Self::with_connector_impl(connector)
ClientConnector {
connector,
stats: ClientConnectorStats::default(),
subscriber: None,
acq_tx: tx,
acq_rx: Some(rx),
resolver: None,
conn_lifetime: Duration::from_secs(75),
conn_keep_alive: Duration::from_secs(15),
limit: 100,
limit_per_host: 0,
acquired: 0,
acquired_per_host: HashMap::new(),
available: HashMap::new(),
to_close: Vec::new(),
waiters: Some(HashMap::new()),
wait_timeout: None,
paused: Paused::No,
}
} }
#[cfg( #[cfg(all(
all( feature = "rust-tls",
feature = "rust-tls", not(any(feature = "alpn", feature = "ssl", feature = "tls"))
not(any(feature = "alpn", feature = "tls")) ))]
)
)]
/// Create `ClientConnector` actor with custom `SslConnector` instance. /// Create `ClientConnector` actor with custom `SslConnector` instance.
/// ///
/// By default `ClientConnector` uses very a simple SSL configuration. /// By default `ClientConnector` uses very a simple SSL configuration.
@@ -441,10 +356,8 @@ impl ClientConnector {
/// # #![cfg(feature = "rust-tls")] /// # #![cfg(feature = "rust-tls")]
/// # extern crate actix_web; /// # extern crate actix_web;
/// # extern crate futures; /// # extern crate futures;
/// # extern crate tokio;
/// # use futures::{future, Future}; /// # use futures::{future, Future};
/// # use std::io::Write; /// # use std::io::Write;
/// # use std::process;
/// # use actix_web::actix::Actor; /// # use actix_web::actix::Actor;
/// extern crate rustls; /// extern crate rustls;
/// extern crate webpki_roots; /// extern crate webpki_roots;
@@ -476,10 +389,61 @@ impl ClientConnector {
/// } /// }
/// ``` /// ```
pub fn with_connector(connector: ClientConfig) -> ClientConnector { pub fn with_connector(connector: ClientConfig) -> ClientConnector {
// keep level of indirection for docstrings matching featureflags
Self::with_connector_impl(Arc::new(connector))
}
#[cfg(all(
feature = "tls",
not(any(feature = "ssl", feature = "alpn", feature = "rust-tls"))
))]
/// Create `ClientConnector` actor with custom `SslConnector` instance.
///
/// By default `ClientConnector` uses very a simple SSL configuration.
/// With `with_connector` method it is possible to use a custom
/// `SslConnector` object.
///
/// ```rust
/// # #![cfg(feature = "tls")]
/// # extern crate actix_web;
/// # extern crate futures;
/// # use futures::{future, Future};
/// # use std::io::Write;
/// # use actix_web::actix::Actor;
/// extern crate native_tls;
/// extern crate webpki_roots;
/// use native_tls::TlsConnector;
/// use actix_web::{actix, client::ClientConnector, client::Connect};
///
/// fn main() {
/// actix::run(|| {
/// let connector = TlsConnector::new().unwrap();
/// let conn = ClientConnector::with_connector(connector.into()).start();
///
/// conn.send(
/// Connect::new("https://www.rust-lang.org").unwrap()) // <- connect to host
/// .map_err(|_| ())
/// .and_then(|res| {
/// if let Ok(mut stream) = res {
/// stream.write_all(b"GET / HTTP/1.0\r\n\r\n").unwrap();
/// }
/// # actix::System::current().stop();
/// Ok(())
/// })
/// });
/// }
/// ```
pub fn with_connector(connector: SslConnector) -> ClientConnector {
// keep level of indirection for docstrings matching featureflags
Self::with_connector_impl(connector)
}
#[inline]
fn with_connector_impl(connector: SslConnector) -> ClientConnector {
let (tx, rx) = mpsc::unbounded(); let (tx, rx) = mpsc::unbounded();
ClientConnector { ClientConnector {
connector: Arc::new(connector), connector,
stats: ClientConnectorStats::default(), stats: ClientConnectorStats::default(),
subscriber: None, subscriber: None,
acq_tx: tx, acq_tx: tx,
@@ -769,7 +733,7 @@ impl ClientConnector {
act.release_key(&key2); act.release_key(&key2);
() ()
}).and_then(move |res, act, _| { }).and_then(move |res, act, _| {
#[cfg(feature = "alpn")] #[cfg(any(feature = "alpn", feature = "ssl"))]
match res { match res {
Err(err) => { Err(err) => {
let _ = waiter.tx.send(Err(err.into())); let _ = waiter.tx.send(Err(err.into()));
@@ -811,7 +775,7 @@ impl ClientConnector {
} }
} }
#[cfg(all(feature = "tls", not(feature = "alpn")))] #[cfg(all(feature = "tls", not(any(feature = "alpn", feature = "ssl"))))]
match res { match res {
Err(err) => { Err(err) => {
let _ = waiter.tx.send(Err(err.into())); let _ = waiter.tx.send(Err(err.into()));
@@ -853,12 +817,10 @@ impl ClientConnector {
} }
} }
#[cfg( #[cfg(all(
all( feature = "rust-tls",
feature = "rust-tls", not(any(feature = "alpn", feature = "ssl", feature = "tls"))
not(any(feature = "alpn", feature = "tls")) ))]
)
)]
match res { match res {
Err(err) => { Err(err) => {
let _ = waiter.tx.send(Err(err.into())); let _ = waiter.tx.send(Err(err.into()));
@@ -901,7 +863,12 @@ impl ClientConnector {
} }
} }
#[cfg(not(any(feature = "alpn", feature = "tls", feature = "rust-tls")))] #[cfg(not(any(
feature = "alpn",
feature = "ssl",
feature = "tls",
feature = "rust-tls"
)))]
match res { match res {
Err(err) => { Err(err) => {
let _ = waiter.tx.send(Err(err.into())); let _ = waiter.tx.send(Err(err.into()));
@@ -1317,6 +1284,11 @@ impl IoStream for Connection {
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> { fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
IoStream::set_linger(&mut *self.stream, dur) IoStream::set_linger(&mut *self.stream, dur)
} }
#[inline]
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
IoStream::set_keepalive(&mut *self.stream, dur)
}
} }
impl io::Read for Connection { impl io::Read for Connection {
@@ -1344,7 +1316,7 @@ impl AsyncWrite for Connection {
} }
#[cfg(feature = "tls")] #[cfg(feature = "tls")]
use tokio_tls::{TlsStream}; use tokio_tls::TlsStream;
#[cfg(feature = "tls")] #[cfg(feature = "tls")]
/// This is temp solution untile actix-net migration /// This is temp solution untile actix-net migration
@@ -1364,4 +1336,9 @@ impl<Io: IoStream> IoStream for TlsStream<Io> {
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> { fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_linger(dur) self.get_mut().get_mut().set_linger(dur)
} }
}
#[inline]
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_keepalive(dur)
}
}

View File

@@ -20,6 +20,7 @@ const MAX_HEADERS: usize = 96;
#[derive(Default)] #[derive(Default)]
pub struct HttpResponseParser { pub struct HttpResponseParser {
decoder: Option<EncodingDecoder>, decoder: Option<EncodingDecoder>,
eof: bool, // indicate that we read payload until stream eof
} }
#[derive(Debug, Fail)] #[derive(Debug, Fail)]
@@ -38,43 +39,42 @@ impl HttpResponseParser {
where where
T: IoStream, T: IoStream,
{ {
// if buf is empty parse_message will always return NotReady, let's avoid that loop {
if buf.is_empty() { // Don't call parser until we have data to parse.
if !buf.is_empty() {
match HttpResponseParser::parse_message(buf)
.map_err(HttpResponseParserError::Error)?
{
Async::Ready((msg, info)) => {
if let Some((decoder, eof)) = info {
self.eof = eof;
self.decoder = Some(decoder);
} else {
self.eof = false;
self.decoder = None;
}
return Ok(Async::Ready(msg));
}
Async::NotReady => {
if buf.capacity() >= MAX_BUFFER_SIZE {
return Err(HttpResponseParserError::Error(
ParseError::TooLarge,
));
}
// Parser needs more data.
}
}
}
// Read some more data into the buffer for the parser.
match io.read_available(buf) { match io.read_available(buf) {
Ok(Async::Ready(true)) => { Ok(Async::Ready((false, true))) => {
return Err(HttpResponseParserError::Disconnect) return Err(HttpResponseParserError::Disconnect)
} }
Ok(Async::Ready(false)) => (), Ok(Async::Ready(_)) => (),
Ok(Async::NotReady) => return Ok(Async::NotReady), Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(err) => return Err(HttpResponseParserError::Error(err.into())), Err(err) => return Err(HttpResponseParserError::Error(err.into())),
} }
} }
loop {
match HttpResponseParser::parse_message(buf)
.map_err(HttpResponseParserError::Error)?
{
Async::Ready((msg, decoder)) => {
self.decoder = decoder;
return Ok(Async::Ready(msg));
}
Async::NotReady => {
if buf.capacity() >= MAX_BUFFER_SIZE {
return Err(HttpResponseParserError::Error(ParseError::TooLarge));
}
match io.read_available(buf) {
Ok(Async::Ready(true)) => {
return Err(HttpResponseParserError::Disconnect)
}
Ok(Async::Ready(false)) => (),
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(err) => {
return Err(HttpResponseParserError::Error(err.into()))
}
}
}
}
}
} }
pub fn parse_payload<T>( pub fn parse_payload<T>(
@@ -87,8 +87,8 @@ impl HttpResponseParser {
loop { loop {
// read payload // read payload
let (not_ready, stream_finished) = match io.read_available(buf) { let (not_ready, stream_finished) = match io.read_available(buf) {
Ok(Async::Ready(true)) => (false, true), Ok(Async::Ready((_, true))) => (false, true),
Ok(Async::Ready(false)) => (false, false), Ok(Async::Ready((_, false))) => (false, false),
Ok(Async::NotReady) => (true, false), Ok(Async::NotReady) => (true, false),
Err(err) => return Err(err.into()), Err(err) => return Err(err.into()),
}; };
@@ -104,7 +104,12 @@ impl HttpResponseParser {
return Ok(Async::NotReady); return Ok(Async::NotReady);
} }
if stream_finished { if stream_finished {
return Err(PayloadError::Incomplete); // read untile eof?
if self.eof {
return Ok(Async::Ready(None));
} else {
return Err(PayloadError::Incomplete);
}
} }
} }
Err(err) => return Err(err.into()), Err(err) => return Err(err.into()),
@@ -117,7 +122,7 @@ impl HttpResponseParser {
fn parse_message( fn parse_message(
buf: &mut BytesMut, buf: &mut BytesMut,
) -> Poll<(ClientResponse, Option<EncodingDecoder>), ParseError> { ) -> Poll<(ClientResponse, Option<(EncodingDecoder, bool)>), ParseError> {
// Unsafe: we read only this data only after httparse parses headers into. // Unsafe: we read only this data only after httparse parses headers into.
// performance bump for pipeline benchmarks. // performance bump for pipeline benchmarks.
let mut headers: [HeaderIndex; MAX_HEADERS] = unsafe { mem::uninitialized() }; let mut headers: [HeaderIndex; MAX_HEADERS] = unsafe { mem::uninitialized() };
@@ -163,12 +168,12 @@ impl HttpResponseParser {
} }
let decoder = if status == StatusCode::SWITCHING_PROTOCOLS { let decoder = if status == StatusCode::SWITCHING_PROTOCOLS {
Some(EncodingDecoder::eof()) Some((EncodingDecoder::eof(), true))
} else if let Some(len) = hdrs.get(header::CONTENT_LENGTH) { } else if let Some(len) = hdrs.get(header::CONTENT_LENGTH) {
// Content-Length // Content-Length
if let Ok(s) = len.to_str() { if let Ok(s) = len.to_str() {
if let Ok(len) = s.parse::<u64>() { if let Ok(len) = s.parse::<u64>() {
Some(EncodingDecoder::length(len)) Some((EncodingDecoder::length(len), false))
} else { } else {
debug!("illegal Content-Length: {:?}", len); debug!("illegal Content-Length: {:?}", len);
return Err(ParseError::Header); return Err(ParseError::Header);
@@ -179,7 +184,18 @@ impl HttpResponseParser {
} }
} else if chunked(&hdrs)? { } else if chunked(&hdrs)? {
// Chunked encoding // Chunked encoding
Some(EncodingDecoder::chunked()) Some((EncodingDecoder::chunked(), false))
} else if let Some(value) = hdrs.get(header::CONNECTION) {
let close = if let Ok(s) = value.to_str() {
s == "close"
} else {
false
};
if close {
Some((EncodingDecoder::eof(), true))
} else {
None
}
} else { } else {
None None
}; };

View File

@@ -254,16 +254,16 @@ impl ClientRequest {
impl fmt::Debug for ClientRequest { impl fmt::Debug for ClientRequest {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let res = writeln!( writeln!(
f, f,
"\nClientRequest {:?} {}:{}", "\nClientRequest {:?} {}:{}",
self.version, self.method, self.uri self.version, self.method, self.uri
); )?;
let _ = writeln!(f, " headers:"); writeln!(f, " headers:")?;
for (key, val) in self.headers.iter() { for (key, val) in self.headers.iter() {
let _ = writeln!(f, " {:?}: {:?}", key, val); writeln!(f, " {:?}: {:?}", key, val)?;
} }
res Ok(())
} }
} }
@@ -750,16 +750,16 @@ fn parts<'a>(
impl fmt::Debug for ClientRequestBuilder { impl fmt::Debug for ClientRequestBuilder {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(ref parts) = self.request { if let Some(ref parts) = self.request {
let res = writeln!( writeln!(
f, f,
"\nClientRequestBuilder {:?} {}:{}", "\nClientRequestBuilder {:?} {}:{}",
parts.version, parts.method, parts.uri parts.version, parts.method, parts.uri
); )?;
let _ = writeln!(f, " headers:"); writeln!(f, " headers:")?;
for (key, val) in parts.headers.iter() { for (key, val) in parts.headers.iter() {
let _ = writeln!(f, " {:?}: {:?}", key, val); writeln!(f, " {:?}: {:?}", key, val)?;
} }
res Ok(())
} else { } else {
write!(f, "ClientRequestBuilder(Consumed)") write!(f, "ClientRequestBuilder(Consumed)")
} }

View File

@@ -95,12 +95,12 @@ impl ClientResponse {
impl fmt::Debug for ClientResponse { impl fmt::Debug for ClientResponse {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let res = writeln!(f, "\nClientResponse {:?} {}", self.version(), self.status()); writeln!(f, "\nClientResponse {:?} {}", self.version(), self.status())?;
let _ = writeln!(f, " headers:"); writeln!(f, " headers:")?;
for (key, val) in self.headers().iter() { for (key, val) in self.headers().iter() {
let _ = writeln!(f, " {:?}: {:?}", key, val); writeln!(f, " {:?}: {:?}", key, val)?;
} }
res Ok(())
} }
} }

View File

@@ -1,4 +1,7 @@
#![cfg_attr(feature = "cargo-clippy", allow(redundant_field_names))] #![cfg_attr(
feature = "cargo-clippy",
allow(clippy::redundant_field_names)
)]
use std::cell::RefCell; use std::cell::RefCell;
use std::fmt::Write as FmtWrite; use std::fmt::Write as FmtWrite;

View File

@@ -175,7 +175,7 @@ impl<'de, S: 'de> Deserializer<'de> for PathDeserializer<'de, S> {
parse_single_value!(deserialize_bool, visit_bool, "bool"); parse_single_value!(deserialize_bool, visit_bool, "bool");
parse_single_value!(deserialize_i8, visit_i8, "i8"); parse_single_value!(deserialize_i8, visit_i8, "i8");
parse_single_value!(deserialize_i16, visit_i16, "i16"); parse_single_value!(deserialize_i16, visit_i16, "i16");
parse_single_value!(deserialize_i32, visit_i32, "i16"); parse_single_value!(deserialize_i32, visit_i32, "i32");
parse_single_value!(deserialize_i64, visit_i64, "i64"); parse_single_value!(deserialize_i64, visit_i64, "i64");
parse_single_value!(deserialize_u8, visit_u8, "u8"); parse_single_value!(deserialize_u8, visit_u8, "u8");
parse_single_value!(deserialize_u16, visit_u16, "u16"); parse_single_value!(deserialize_u16, visit_u16, "u16");

View File

@@ -31,6 +31,7 @@ impl Hasher for IdHasher {
type AnyMap = HashMap<TypeId, Box<Any>, BuildHasherDefault<IdHasher>>; type AnyMap = HashMap<TypeId, Box<Any>, BuildHasherDefault<IdHasher>>;
#[derive(Default)]
/// A type map of request extensions. /// A type map of request extensions.
pub struct Extensions { pub struct Extensions {
map: AnyMap, map: AnyMap,

View File

@@ -530,8 +530,7 @@ where
/// } /// }
/// ///
/// /// extract path info using serde /// /// extract path info using serde
/// fn index(data: (State<MyApp>, Path<Info>)) -> String { /// fn index(state: State<MyApp>, path: Path<Info>) -> String {
/// let (state, path) = data;
/// format!("{} {}!", state.msg, path.username) /// format!("{} {}!", state.msg, path.username)
/// } /// }
/// ///

View File

@@ -354,24 +354,24 @@ impl<S> FromRequest<S> for HttpRequest<S> {
impl<S> fmt::Debug for HttpRequest<S> { impl<S> fmt::Debug for HttpRequest<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let res = writeln!( writeln!(
f, f,
"\nHttpRequest {:?} {}:{}", "\nHttpRequest {:?} {}:{}",
self.version(), self.version(),
self.method(), self.method(),
self.path() self.path()
); )?;
if !self.query_string().is_empty() { if !self.query_string().is_empty() {
let _ = writeln!(f, " query: ?{:?}", self.query_string()); writeln!(f, " query: ?{:?}", self.query_string())?;
} }
if !self.match_info().is_empty() { if !self.match_info().is_empty() {
let _ = writeln!(f, " params: {:?}", self.match_info()); writeln!(f, " params: {:?}", self.match_info())?;
} }
let _ = writeln!(f, " headers:"); writeln!(f, " headers:")?;
for (key, val) in self.headers().iter() { for (key, val) in self.headers().iter() {
let _ = writeln!(f, " {:?}: {:?}", key, val); writeln!(f, " {:?}: {:?}", key, val)?;
} }
res Ok(())
} }
} }

View File

@@ -272,7 +272,7 @@ impl HttpResponse {
self.get_mut().response_size = size; self.get_mut().response_size = size;
} }
/// Set write buffer capacity /// Get write buffer capacity
pub fn write_buffer_capacity(&self) -> usize { pub fn write_buffer_capacity(&self) -> usize {
self.get_ref().write_capacity self.get_ref().write_capacity
} }
@@ -694,7 +694,7 @@ impl HttpResponseBuilder {
} }
#[inline] #[inline]
#[cfg_attr(feature = "cargo-clippy", allow(borrowed_box))] #[cfg_attr(feature = "cargo-clippy", allow(clippy::borrowed_box))]
fn parts<'a>( fn parts<'a>(
parts: &'a mut Option<Box<InnerHttpResponse>>, err: &Option<HttpError>, parts: &'a mut Option<Box<InnerHttpResponse>>, err: &Option<HttpError>,
) -> Option<&'a mut Box<InnerHttpResponse>> { ) -> Option<&'a mut Box<InnerHttpResponse>> {

View File

@@ -16,7 +16,10 @@ pub struct ConnectionInfo {
impl ConnectionInfo { impl ConnectionInfo {
/// Create *ConnectionInfo* instance for a request. /// Create *ConnectionInfo* instance for a request.
#[cfg_attr(feature = "cargo-clippy", allow(cyclomatic_complexity))] #[cfg_attr(
feature = "cargo-clippy",
allow(clippy::cyclomatic_complexity)
)]
pub fn update(&mut self, req: &Request) { pub fn update(&mut self, req: &Request) {
let mut host = None; let mut host = None;
let mut scheme = None; let mut scheme = None;

View File

@@ -64,8 +64,8 @@
//! ## Package feature //! ## Package feature
//! //!
//! * `tls` - enables ssl support via `native-tls` crate //! * `tls` - enables ssl support via `native-tls` crate
//! * `alpn` - enables ssl support via `openssl` crate, require for `http/2` //! * `ssl` - enables ssl support via `openssl` crate, supports `http/2`
//! support //! * `rust-tls` - enables ssl support via `rustls` crate, supports `http/2`
//! * `uds` - enables support for making client requests via Unix Domain Sockets. //! * `uds` - enables support for making client requests via Unix Domain Sockets.
//! Unix only. Not necessary for *serving* requests. //! Unix only. Not necessary for *serving* requests.
//! * `session` - enables session support, includes `ring` crate as //! * `session` - enables session support, includes `ring` crate as
@@ -80,11 +80,8 @@
#![cfg_attr(actix_nightly, feature( #![cfg_attr(actix_nightly, feature(
specialization, // for impl ErrorResponse for std::error::Error specialization, // for impl ErrorResponse for std::error::Error
extern_prelude, extern_prelude,
tool_lints,
))] ))]
#![cfg_attr(
feature = "cargo-clippy",
allow(decimal_literal_representation, suspicious_arithmetic_impl)
)]
#![warn(missing_docs)] #![warn(missing_docs)]
#[macro_use] #[macro_use]
@@ -118,6 +115,7 @@ extern crate parking_lot;
extern crate rand; extern crate rand;
extern crate slab; extern crate slab;
extern crate tokio; extern crate tokio;
extern crate tokio_current_thread;
extern crate tokio_io; extern crate tokio_io;
extern crate tokio_reactor; extern crate tokio_reactor;
extern crate tokio_tcp; extern crate tokio_tcp;
@@ -139,6 +137,8 @@ extern crate serde_urlencoded;
extern crate percent_encoding; extern crate percent_encoding;
extern crate serde_json; extern crate serde_json;
extern crate smallvec; extern crate smallvec;
extern crate actix_net;
#[macro_use] #[macro_use]
extern crate actix as actix_inner; extern crate actix as actix_inner;

View File

@@ -826,8 +826,8 @@ impl<S: 'static> CorsBuilder<S> {
if let AllOrSome::Some(ref origins) = cors.origins { if let AllOrSome::Some(ref origins) = cors.origins {
let s = origins let s = origins
.iter() .iter()
.fold(String::new(), |s, v| s + &v.to_string()); .fold(String::new(), |s, v| format!("{}, {}", s, v));
cors.origins_str = Some(HeaderValue::try_from(s.as_str()).unwrap()); cors.origins_str = Some(HeaderValue::try_from(&s[2..]).unwrap());
} }
if !self.expose_hdrs.is_empty() { if !self.expose_hdrs.is_empty() {
@@ -1122,16 +1122,29 @@ mod tests {
let cors = Cors::build() let cors = Cors::build()
.disable_vary_header() .disable_vary_header()
.allowed_origin("https://www.example.com") .allowed_origin("https://www.example.com")
.allowed_origin("https://www.google.com")
.finish(); .finish();
let resp: HttpResponse = HttpResponse::Ok().into(); let resp: HttpResponse = HttpResponse::Ok().into();
let resp = cors.response(&req, resp).unwrap().response(); let resp = cors.response(&req, resp).unwrap().response();
assert_eq!(
&b"https://www.example.com"[..], let origins_str = resp
resp.headers() .headers()
.get(header::ACCESS_CONTROL_ALLOW_ORIGIN) .get(header::ACCESS_CONTROL_ALLOW_ORIGIN)
.unwrap() .unwrap()
.as_bytes() .to_str()
); .unwrap();
if origins_str.starts_with("https://www.example.com") {
assert_eq!(
"https://www.example.com, https://www.google.com",
origins_str
);
} else {
assert_eq!(
"https://www.google.com, https://www.example.com",
origins_str
);
}
} }
#[test] #[test]

View File

@@ -48,7 +48,7 @@ impl DefaultHeaders {
/// Set a header. /// Set a header.
#[inline] #[inline]
#[cfg_attr(feature = "cargo-clippy", allow(match_wild_err_arm))] #[cfg_attr(feature = "cargo-clippy", allow(clippy::match_wild_err_arm))]
pub fn header<K, V>(mut self, key: K, value: V) -> Self pub fn header<K, V>(mut self, key: K, value: V) -> Self
where where
HeaderName: HttpTryFrom<K>, HeaderName: HttpTryFrom<K>,

View File

@@ -270,14 +270,17 @@ impl<S: 'static, T: SessionBackend<S>> Middleware<S> for SessionStorage<T, S> {
} }
/// A simple key-value storage interface that is internally used by `Session`. /// A simple key-value storage interface that is internally used by `Session`.
#[doc(hidden)]
pub trait SessionImpl: 'static { pub trait SessionImpl: 'static {
/// Get session value by key
fn get(&self, key: &str) -> Option<&str>; fn get(&self, key: &str) -> Option<&str>;
/// Set session value
fn set(&mut self, key: &str, value: String); fn set(&mut self, key: &str, value: String);
/// Remove specific key from session
fn remove(&mut self, key: &str); fn remove(&mut self, key: &str);
/// Remove all values from session
fn clear(&mut self); fn clear(&mut self);
/// Write session to storage backend. /// Write session to storage backend.
@@ -285,9 +288,10 @@ pub trait SessionImpl: 'static {
} }
/// Session's storage backend trait definition. /// Session's storage backend trait definition.
#[doc(hidden)]
pub trait SessionBackend<S>: Sized + 'static { pub trait SessionBackend<S>: Sized + 'static {
/// Session item
type Session: SessionImpl; type Session: SessionImpl;
/// Future that reads session
type ReadFuture: Future<Item = Self::Session, Error = Error>; type ReadFuture: Future<Item = Self::Session, Error = Error>;
/// Parse the session from request and load data from a storage backend. /// Parse the session from request and load data from a storage backend.

View File

@@ -441,13 +441,13 @@ where
impl<S> fmt::Debug for Field<S> { impl<S> fmt::Debug for Field<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let res = writeln!(f, "\nMultipartField: {}", self.ct); writeln!(f, "\nMultipartField: {}", self.ct)?;
let _ = writeln!(f, " boundary: {}", self.inner.borrow().boundary); writeln!(f, " boundary: {}", self.inner.borrow().boundary)?;
let _ = writeln!(f, " headers:"); writeln!(f, " headers:")?;
for (key, val) in self.headers.iter() { for (key, val) in self.headers.iter() {
let _ = writeln!(f, " {:?}: {:?}", key, val); writeln!(f, " {:?}: {:?}", key, val)?;
} }
res Ok(())
} }
} }

View File

@@ -236,7 +236,6 @@ macro_rules! FROM_STR {
($type:ty) => { ($type:ty) => {
impl FromParam for $type { impl FromParam for $type {
type Err = InternalError<<$type as FromStr>::Err>; type Err = InternalError<<$type as FromStr>::Err>;
fn from_param(val: &str) -> Result<Self, Self::Err> { fn from_param(val: &str) -> Result<Self, Self::Err> {
<$type as FromStr>::from_str(val) <$type as FromStr>::from_str(val)
.map_err(|e| InternalError::new(e, StatusCode::BAD_REQUEST)) .map_err(|e| InternalError::new(e, StatusCode::BAD_REQUEST))

View File

@@ -1,6 +1,8 @@
//! Payload stream //! Payload stream
use bytes::{Bytes, BytesMut}; use bytes::{Bytes, BytesMut};
use futures::task::{current as current_task, Task}; #[cfg(not(test))]
use futures::task::current as current_task;
use futures::task::Task;
use futures::{Async, Poll, Stream}; use futures::{Async, Poll, Stream};
use std::cell::RefCell; use std::cell::RefCell;
use std::cmp; use std::cmp;

View File

@@ -134,8 +134,7 @@ impl<S: 'static> Route<S> {
/// } /// }
/// ``` /// ```
/// ///
/// It is possible to use tuples for specifing multiple extractors for one /// It is possible to use multiple extractors for one handler function.
/// handler function.
/// ///
/// ```rust /// ```rust
/// # extern crate bytes; /// # extern crate bytes;
@@ -152,9 +151,9 @@ impl<S: 'static> Route<S> {
/// ///
/// /// extract path info using serde /// /// extract path info using serde
/// fn index( /// fn index(
/// info: (Path<Info>, Query<HashMap<String, String>>, Json<Info>), /// path: Path<Info>, query: Query<HashMap<String, String>>, body: Json<Info>,
/// ) -> Result<String> { /// ) -> Result<String> {
/// Ok(format!("Welcome {}!", info.0.username)) /// Ok(format!("Welcome {}!", path.username))
/// } /// }
/// ///
/// fn main() { /// fn main() {

View File

@@ -815,73 +815,70 @@ impl ResourceDef {
Ok(()) Ok(())
} }
fn parse( fn parse_param(pattern: &str) -> (PatternElement, String, &str) {
pattern: &str, for_prefix: bool,
) -> (String, Vec<PatternElement>, bool, usize) {
const DEFAULT_PATTERN: &str = "[^/]+"; const DEFAULT_PATTERN: &str = "[^/]+";
let mut params_nesting = 0usize;
let mut re1 = String::from("^"); let close_idx = pattern
let mut re2 = String::new(); .find(|c| match c {
let mut el = String::new(); '{' => {
let mut in_param = false; params_nesting += 1;
let mut in_param_pattern = false; false
let mut param_name = String::new();
let mut param_pattern = String::from(DEFAULT_PATTERN);
let mut is_dynamic = false;
let mut elems = Vec::new();
let mut len = 0;
for ch in pattern.chars() {
if in_param {
// In parameter segment: `{....}`
if ch == '}' {
elems.push(PatternElement::Var(param_name.clone()));
re1.push_str(&format!(r"(?P<{}>{})", &param_name, &param_pattern));
param_name.clear();
param_pattern = String::from(DEFAULT_PATTERN);
len = 0;
in_param_pattern = false;
in_param = false;
} else if ch == ':' {
// The parameter name has been determined; custom pattern land
in_param_pattern = true;
param_pattern.clear();
} else if in_param_pattern {
// Ignore leading whitespace for pattern
if !(ch == ' ' && param_pattern.is_empty()) {
param_pattern.push(ch);
}
} else {
param_name.push(ch);
} }
} else if ch == '{' { '}' => {
in_param = true; params_nesting -= 1;
is_dynamic = true; params_nesting == 0
elems.push(PatternElement::Str(el.clone())); }
el.clear(); _ => false,
} else { }).expect("malformed param");
re1.push_str(escape(&ch.to_string()).as_str()); let (mut param, rem) = pattern.split_at(close_idx + 1);
re2.push(ch); param = &param[1..param.len() - 1]; // Remove outer brackets
el.push(ch); let (name, pattern) = match param.find(':') {
len += 1; Some(idx) => {
let (name, pattern) = param.split_at(idx);
(name, &pattern[1..])
} }
} None => (param, DEFAULT_PATTERN),
if !el.is_empty() {
elems.push(PatternElement::Str(el.clone()));
}
let re = if is_dynamic {
if !for_prefix {
re1.push('$');
}
re1
} else {
re2
}; };
(re, elems, is_dynamic, len) (
PatternElement::Var(name.to_string()),
format!(r"(?P<{}>{})", &name, &pattern),
rem,
)
}
fn parse(
mut pattern: &str, for_prefix: bool,
) -> (String, Vec<PatternElement>, bool, usize) {
if pattern.find('{').is_none() {
return (
String::from(pattern),
vec![PatternElement::Str(String::from(pattern))],
false,
pattern.chars().count(),
);
};
let mut elems = Vec::new();
let mut re = String::from("^");
while let Some(idx) = pattern.find('{') {
let (prefix, rem) = pattern.split_at(idx);
elems.push(PatternElement::Str(String::from(prefix)));
re.push_str(&escape(prefix));
let (param_pattern, re_part, rem) = Self::parse_param(rem);
elems.push(param_pattern);
re.push_str(&re_part);
pattern = rem;
}
elems.push(PatternElement::Str(String::from(pattern)));
re.push_str(&escape(pattern));
if !for_prefix {
re.push_str("$");
}
(re, elems, true, pattern.chars().count())
} }
} }
@@ -1072,6 +1069,16 @@ mod tests {
let info = re.match_with_params(&req, 0).unwrap(); let info = re.match_with_params(&req, 0).unwrap();
assert_eq!(info.get("version").unwrap(), "151"); assert_eq!(info.get("version").unwrap(), "151");
assert_eq!(info.get("id").unwrap(), "adahg32"); assert_eq!(info.get("id").unwrap(), "adahg32");
let re = ResourceDef::new("/{id:[[:digit:]]{6}}");
assert!(re.is_match("/012345"));
assert!(!re.is_match("/012"));
assert!(!re.is_match("/01234567"));
assert!(!re.is_match("/XXXXXX"));
let req = TestRequest::with_uri("/012345").finish();
let info = re.match_with_params(&req, 0).unwrap();
assert_eq!(info.get("id").unwrap(), "012345");
} }
#[test] #[test]

View File

@@ -5,7 +5,10 @@ use std::rc::Rc;
use futures::{Async, Future, Poll}; use futures::{Async, Future, Poll};
use error::Error; use error::Error;
use handler::{AsyncResult, AsyncResultItem, FromRequest, Responder, RouteHandler}; use handler::{
AsyncResult, AsyncResultItem, FromRequest, Handler, Responder, RouteHandler,
WrapHandler,
};
use http::Method; use http::Method;
use httprequest::HttpRequest; use httprequest::HttpRequest;
use httpresponse::HttpResponse; use httpresponse::HttpResponse;
@@ -56,7 +59,10 @@ pub struct Scope<S> {
middlewares: Rc<Vec<Box<Middleware<S>>>>, middlewares: Rc<Vec<Box<Middleware<S>>>>,
} }
#[cfg_attr(feature = "cargo-clippy", allow(new_without_default_derive))] #[cfg_attr(
feature = "cargo-clippy",
allow(clippy::new_without_default_derive)
)]
impl<S: 'static> Scope<S> { impl<S: 'static> Scope<S> {
/// Create a new scope /// Create a new scope
pub fn new(path: &str) -> Scope<S> { pub fn new(path: &str) -> Scope<S> {
@@ -180,7 +186,7 @@ impl<S: 'static> Scope<S> {
where where
F: FnOnce(Scope<S>) -> Scope<S>, F: FnOnce(Scope<S>) -> Scope<S>,
{ {
let rdef = ResourceDef::prefix(&path); let rdef = ResourceDef::prefix(&insert_slash(path));
let scope = Scope { let scope = Scope {
rdef: rdef.clone(), rdef: rdef.clone(),
filters: Vec::new(), filters: Vec::new(),
@@ -227,9 +233,11 @@ impl<S: 'static> Scope<S> {
R: Responder + 'static, R: Responder + 'static,
T: FromRequest<S> + 'static, T: FromRequest<S> + 'static,
{ {
Rc::get_mut(&mut self.router) Rc::get_mut(&mut self.router).unwrap().register_route(
.unwrap() &insert_slash(path),
.register_route(path, method, f); method,
f,
);
self self
} }
@@ -261,7 +269,7 @@ impl<S: 'static> Scope<S> {
F: FnOnce(&mut Resource<S>) -> R + 'static, F: FnOnce(&mut Resource<S>) -> R + 'static,
{ {
// add resource // add resource
let mut resource = Resource::new(ResourceDef::new(path)); let mut resource = Resource::new(ResourceDef::new(&insert_slash(path)));
f(&mut resource); f(&mut resource);
Rc::get_mut(&mut self.router) Rc::get_mut(&mut self.router)
@@ -286,6 +294,35 @@ impl<S: 'static> Scope<S> {
self self
} }
/// Configure handler for specific path prefix.
///
/// A path prefix consists of valid path segments, i.e for the
/// prefix `/app` any request with the paths `/app`, `/app/` or
/// `/app/test` would match, but the path `/application` would
/// not.
///
/// ```rust
/// # extern crate actix_web;
/// use actix_web::{http, App, HttpRequest, HttpResponse};
///
/// fn main() {
/// let app = App::new().scope("/scope-prefix", |scope| {
/// scope.handler("/app", |req: &HttpRequest| match *req.method() {
/// http::Method::GET => HttpResponse::Ok(),
/// http::Method::POST => HttpResponse::MethodNotAllowed(),
/// _ => HttpResponse::NotFound(),
/// })
/// });
/// }
/// ```
pub fn handler<H: Handler<S>>(mut self, path: &str, handler: H) -> Scope<S> {
let path = insert_slash(path.trim().trim_right_matches('/'));
Rc::get_mut(&mut self.router)
.expect("Multiple copies of scope router")
.register_handler(&path, Box::new(WrapHandler::new(handler)), None);
self
}
/// Register a scope middleware /// Register a scope middleware
/// ///
/// This is similar to `App's` middlewares, but /// This is similar to `App's` middlewares, but
@@ -301,6 +338,14 @@ impl<S: 'static> Scope<S> {
} }
} }
fn insert_slash(path: &str) -> String {
let mut path = path.to_owned();
if !path.is_empty() && !path.starts_with('/') {
path.insert(0, '/');
};
path
}
impl<S: 'static> RouteHandler<S> for Scope<S> { impl<S: 'static> RouteHandler<S> for Scope<S> {
fn handle(&self, req: &HttpRequest<S>) -> AsyncResult<HttpResponse> { fn handle(&self, req: &HttpRequest<S>) -> AsyncResult<HttpResponse> {
let tail = req.match_info().tail as usize; let tail = req.match_info().tail as usize;
@@ -779,11 +824,37 @@ mod tests {
scope scope
.route("/path1", Method::GET, |_: HttpRequest<_>| { .route("/path1", Method::GET, |_: HttpRequest<_>| {
HttpResponse::Ok() HttpResponse::Ok()
}).route( }).route("/path1", Method::DELETE, |_: HttpRequest<_>| {
"/path1", HttpResponse::Ok()
Method::DELETE, })
|_: HttpRequest<_>| HttpResponse::Ok(), }).finish();
)
let req = TestRequest::with_uri("/app/path1").request();
let resp = app.run(req);
assert_eq!(resp.as_msg().status(), StatusCode::OK);
let req = TestRequest::with_uri("/app/path1")
.method(Method::DELETE)
.request();
let resp = app.run(req);
assert_eq!(resp.as_msg().status(), StatusCode::OK);
let req = TestRequest::with_uri("/app/path1")
.method(Method::POST)
.request();
let resp = app.run(req);
assert_eq!(resp.as_msg().status(), StatusCode::NOT_FOUND);
}
#[test]
fn test_scope_route_without_leading_slash() {
let app = App::new()
.scope("app", |scope| {
scope
.route("path1", Method::GET, |_: HttpRequest<_>| HttpResponse::Ok())
.route("path1", Method::DELETE, |_: HttpRequest<_>| {
HttpResponse::Ok()
})
}).finish(); }).finish();
let req = TestRequest::with_uri("/app/path1").request(); let req = TestRequest::with_uri("/app/path1").request();
@@ -972,6 +1043,20 @@ mod tests {
assert_eq!(resp.as_msg().status(), StatusCode::CREATED); assert_eq!(resp.as_msg().status(), StatusCode::CREATED);
} }
#[test]
fn test_nested_scope_no_slash() {
let app = App::new()
.scope("/app", |scope| {
scope.nested("t1", |scope| {
scope.resource("/path1", |r| r.f(|_| HttpResponse::Created()))
})
}).finish();
let req = TestRequest::with_uri("/app/t1/path1").request();
let resp = app.run(req);
assert_eq!(resp.as_msg().status(), StatusCode::CREATED);
}
#[test] #[test]
fn test_nested_scope_root() { fn test_nested_scope_root() {
let app = App::new() let app = App::new()
@@ -1120,4 +1205,32 @@ mod tests {
let resp = app.run(req); let resp = app.run(req);
assert_eq!(resp.as_msg().status(), StatusCode::METHOD_NOT_ALLOWED); assert_eq!(resp.as_msg().status(), StatusCode::METHOD_NOT_ALLOWED);
} }
#[test]
fn test_handler() {
let app = App::new()
.scope("/scope", |scope| {
scope.handler("/test", |_: &_| HttpResponse::Ok())
}).finish();
let req = TestRequest::with_uri("/scope/test").request();
let resp = app.run(req);
assert_eq!(resp.as_msg().status(), StatusCode::OK);
let req = TestRequest::with_uri("/scope/test/").request();
let resp = app.run(req);
assert_eq!(resp.as_msg().status(), StatusCode::OK);
let req = TestRequest::with_uri("/scope/test/app").request();
let resp = app.run(req);
assert_eq!(resp.as_msg().status(), StatusCode::OK);
let req = TestRequest::with_uri("/scope/testapp").request();
let resp = app.run(req);
assert_eq!(resp.as_msg().status(), StatusCode::NOT_FOUND);
let req = TestRequest::with_uri("/scope/blah").request();
let resp = app.run(req);
assert_eq!(resp.as_msg().status(), StatusCode::NOT_FOUND);
}
} }

View File

@@ -1,472 +0,0 @@
use std::sync::mpsc as sync_mpsc;
use std::time::{Duration, Instant};
use std::{io, net, thread};
use futures::{sync::mpsc, Future};
use mio;
use slab::Slab;
use tokio_timer::Delay;
use actix::{msgs::Execute, Arbiter, System};
use super::server::ServerCommand;
use super::worker::{Conn, WorkerClient};
use super::Token;
pub(crate) enum Command {
Pause,
Resume,
Stop,
Worker(WorkerClient),
}
struct ServerSocketInfo {
addr: net::SocketAddr,
token: Token,
handler: Token,
sock: mio::net::TcpListener,
timeout: Option<Instant>,
}
#[derive(Clone)]
pub(crate) struct AcceptNotify(mio::SetReadiness);
impl AcceptNotify {
pub(crate) fn new(ready: mio::SetReadiness) -> Self {
AcceptNotify(ready)
}
pub(crate) fn notify(&self) {
let _ = self.0.set_readiness(mio::Ready::readable());
}
}
impl Default for AcceptNotify {
fn default() -> Self {
AcceptNotify::new(mio::Registration::new2().1)
}
}
pub(crate) struct AcceptLoop {
cmd_reg: Option<mio::Registration>,
cmd_ready: mio::SetReadiness,
notify_reg: Option<mio::Registration>,
notify_ready: mio::SetReadiness,
tx: sync_mpsc::Sender<Command>,
rx: Option<sync_mpsc::Receiver<Command>>,
srv: Option<(
mpsc::UnboundedSender<ServerCommand>,
mpsc::UnboundedReceiver<ServerCommand>,
)>,
}
impl AcceptLoop {
pub fn new() -> AcceptLoop {
let (tx, rx) = sync_mpsc::channel();
let (cmd_reg, cmd_ready) = mio::Registration::new2();
let (notify_reg, notify_ready) = mio::Registration::new2();
AcceptLoop {
tx,
cmd_ready,
cmd_reg: Some(cmd_reg),
notify_ready,
notify_reg: Some(notify_reg),
rx: Some(rx),
srv: Some(mpsc::unbounded()),
}
}
pub fn send(&self, msg: Command) {
let _ = self.tx.send(msg);
let _ = self.cmd_ready.set_readiness(mio::Ready::readable());
}
pub fn get_notify(&self) -> AcceptNotify {
AcceptNotify::new(self.notify_ready.clone())
}
pub(crate) fn start(
&mut self, socks: Vec<Vec<(Token, net::TcpListener)>>,
workers: Vec<WorkerClient>,
) -> mpsc::UnboundedReceiver<ServerCommand> {
let (tx, rx) = self.srv.take().expect("Can not re-use AcceptInfo");
Accept::start(
self.rx.take().expect("Can not re-use AcceptInfo"),
self.cmd_reg.take().expect("Can not re-use AcceptInfo"),
self.notify_reg.take().expect("Can not re-use AcceptInfo"),
socks,
tx,
workers,
);
rx
}
}
struct Accept {
poll: mio::Poll,
rx: sync_mpsc::Receiver<Command>,
sockets: Slab<ServerSocketInfo>,
workers: Vec<WorkerClient>,
srv: mpsc::UnboundedSender<ServerCommand>,
timer: (mio::Registration, mio::SetReadiness),
next: usize,
backpressure: bool,
}
const DELTA: usize = 100;
const CMD: mio::Token = mio::Token(0);
const TIMER: mio::Token = mio::Token(1);
const NOTIFY: mio::Token = mio::Token(2);
/// This function defines errors that are per-connection. Which basically
/// means that if we get this error from `accept()` system call it means
/// next connection might be ready to be accepted.
///
/// All other errors will incur a timeout before next `accept()` is performed.
/// The timeout is useful to handle resource exhaustion errors like ENFILE
/// and EMFILE. Otherwise, could enter into tight loop.
fn connection_error(e: &io::Error) -> bool {
e.kind() == io::ErrorKind::ConnectionRefused
|| e.kind() == io::ErrorKind::ConnectionAborted
|| e.kind() == io::ErrorKind::ConnectionReset
}
impl Accept {
#![cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
pub(crate) fn start(
rx: sync_mpsc::Receiver<Command>, cmd_reg: mio::Registration,
notify_reg: mio::Registration, socks: Vec<Vec<(Token, net::TcpListener)>>,
srv: mpsc::UnboundedSender<ServerCommand>, workers: Vec<WorkerClient>,
) {
let sys = System::current();
// start accept thread
let _ = thread::Builder::new()
.name("actix-web accept loop".to_owned())
.spawn(move || {
System::set_current(sys);
let mut accept = Accept::new(rx, socks, workers, srv);
// Start listening for incoming commands
if let Err(err) = accept.poll.register(
&cmd_reg,
CMD,
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register Registration: {}", err);
}
// Start listening for notify updates
if let Err(err) = accept.poll.register(
&notify_reg,
NOTIFY,
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register Registration: {}", err);
}
accept.poll();
});
}
fn new(
rx: sync_mpsc::Receiver<Command>, socks: Vec<Vec<(Token, net::TcpListener)>>,
workers: Vec<WorkerClient>, srv: mpsc::UnboundedSender<ServerCommand>,
) -> Accept {
// Create a poll instance
let poll = match mio::Poll::new() {
Ok(poll) => poll,
Err(err) => panic!("Can not create mio::Poll: {}", err),
};
// Start accept
let mut sockets = Slab::new();
for (idx, srv_socks) in socks.into_iter().enumerate() {
for (hnd_token, lst) in srv_socks {
let addr = lst.local_addr().unwrap();
let server = mio::net::TcpListener::from_std(lst)
.expect("Can not create mio::net::TcpListener");
let entry = sockets.vacant_entry();
let token = entry.key();
// Start listening for incoming connections
if let Err(err) = poll.register(
&server,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register io: {}", err);
}
entry.insert(ServerSocketInfo {
addr,
token: hnd_token,
handler: Token(idx),
sock: server,
timeout: None,
});
}
}
// Timer
let (tm, tmr) = mio::Registration::new2();
if let Err(err) =
poll.register(&tm, TIMER, mio::Ready::readable(), mio::PollOpt::edge())
{
panic!("Can not register Registration: {}", err);
}
Accept {
poll,
rx,
sockets,
workers,
srv,
next: 0,
timer: (tm, tmr),
backpressure: false,
}
}
fn poll(&mut self) {
// Create storage for events
let mut events = mio::Events::with_capacity(128);
loop {
if let Err(err) = self.poll.poll(&mut events, None) {
panic!("Poll error: {}", err);
}
for event in events.iter() {
let token = event.token();
match token {
CMD => if !self.process_cmd() {
return;
},
TIMER => self.process_timer(),
NOTIFY => self.backpressure(false),
_ => {
let token = usize::from(token);
if token < DELTA {
continue;
}
self.accept(token - DELTA);
}
}
}
}
}
fn process_timer(&mut self) {
let now = Instant::now();
for (token, info) in self.sockets.iter_mut() {
if let Some(inst) = info.timeout.take() {
if now > inst {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not register server socket {}", err);
} else {
info!("Resume accepting connections on {}", info.addr);
}
} else {
info.timeout = Some(inst);
}
}
}
}
fn process_cmd(&mut self) -> bool {
loop {
match self.rx.try_recv() {
Ok(cmd) => match cmd {
Command::Pause => {
for (_, info) in self.sockets.iter_mut() {
if let Err(err) = self.poll.deregister(&info.sock) {
error!("Can not deregister server socket {}", err);
} else {
info!("Paused accepting connections on {}", info.addr);
}
}
}
Command::Resume => {
for (token, info) in self.sockets.iter() {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not resume socket accept process: {}", err);
} else {
info!(
"Accepting connections on {} has been resumed",
info.addr
);
}
}
}
Command::Stop => {
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
return false;
}
Command::Worker(worker) => {
self.backpressure(false);
self.workers.push(worker);
}
},
Err(err) => match err {
sync_mpsc::TryRecvError::Empty => break,
sync_mpsc::TryRecvError::Disconnected => {
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
return false;
}
},
}
}
true
}
fn backpressure(&mut self, on: bool) {
if self.backpressure {
if !on {
self.backpressure = false;
for (token, info) in self.sockets.iter() {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not resume socket accept process: {}", err);
} else {
info!("Accepting connections on {} has been resumed", info.addr);
}
}
}
} else if on {
self.backpressure = true;
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
}
}
fn accept_one(&mut self, mut msg: Conn<net::TcpStream>) {
if self.backpressure {
while !self.workers.is_empty() {
match self.workers[self.next].send(msg) {
Ok(_) => (),
Err(err) => {
let _ = self.srv.unbounded_send(ServerCommand::WorkerDied(
self.workers[self.next].idx,
));
msg = err.into_inner();
self.workers.swap_remove(self.next);
if self.workers.is_empty() {
error!("No workers");
return;
} else if self.workers.len() <= self.next {
self.next = 0;
}
continue;
}
}
self.next = (self.next + 1) % self.workers.len();
break;
}
} else {
let mut idx = 0;
while idx < self.workers.len() {
idx += 1;
if self.workers[self.next].available() {
match self.workers[self.next].send(msg) {
Ok(_) => {
self.next = (self.next + 1) % self.workers.len();
return;
}
Err(err) => {
let _ = self.srv.unbounded_send(ServerCommand::WorkerDied(
self.workers[self.next].idx,
));
msg = err.into_inner();
self.workers.swap_remove(self.next);
if self.workers.is_empty() {
error!("No workers");
self.backpressure(true);
return;
} else if self.workers.len() <= self.next {
self.next = 0;
}
continue;
}
}
}
self.next = (self.next + 1) % self.workers.len();
}
// enable backpressure
self.backpressure(true);
self.accept_one(msg);
}
}
fn accept(&mut self, token: usize) {
loop {
let msg = if let Some(info) = self.sockets.get_mut(token) {
match info.sock.accept_std() {
Ok((io, addr)) => Conn {
io,
token: info.token,
handler: info.handler,
peer: Some(addr),
},
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return,
Err(ref e) if connection_error(e) => continue,
Err(e) => {
error!("Error accepting connection: {}", e);
if let Err(err) = self.poll.deregister(&info.sock) {
error!("Can not deregister server socket {}", err);
}
// sleep after error
info.timeout = Some(Instant::now() + Duration::from_millis(500));
let r = self.timer.1.clone();
System::current().arbiter().do_send(Execute::new(
move || -> Result<(), ()> {
Arbiter::spawn(
Delay::new(
Instant::now() + Duration::from_millis(510),
).map_err(|_| ())
.and_then(move |_| {
let _ = r.set_readiness(mio::Ready::readable());
Ok(())
}),
);
Ok(())
},
));
return;
}
}
} else {
return;
};
self.accept_one(msg);
}
}
}

396
src/server/acceptor.rs Normal file
View File

@@ -0,0 +1,396 @@
use std::time::Duration;
use std::{fmt, net};
use actix_net::server::ServerMessage;
use actix_net::service::{NewService, Service};
use futures::future::{err, ok, Either, FutureResult};
use futures::{Async, Future, Poll};
use tokio_reactor::Handle;
use tokio_tcp::TcpStream;
use tokio_timer::{sleep, Delay};
// use super::channel::HttpProtocol;
use super::error::AcceptorError;
use super::handler::HttpHandler;
use super::settings::ServiceConfig;
use super::IoStream;
/// This trait indicates types that can create acceptor service for http server.
pub trait AcceptorServiceFactory: Send + Clone + 'static {
type Io: IoStream + Send;
type NewService: NewService<Request = TcpStream, Response = Self::Io>;
fn create(&self) -> Self::NewService;
}
impl<F, T> AcceptorServiceFactory for F
where
F: Fn() -> T + Send + Clone + 'static,
T::Response: IoStream + Send,
T: NewService<Request = TcpStream>,
T::InitError: fmt::Debug,
{
type Io = T::Response;
type NewService = T;
fn create(&self) -> T {
(self)()
}
}
#[derive(Clone)]
/// Default acceptor service convert `TcpStream` to a `tokio_tcp::TcpStream`
pub(crate) struct DefaultAcceptor;
impl AcceptorServiceFactory for DefaultAcceptor {
type Io = TcpStream;
type NewService = DefaultAcceptor;
fn create(&self) -> Self::NewService {
DefaultAcceptor
}
}
impl NewService for DefaultAcceptor {
type Request = TcpStream;
type Response = TcpStream;
type Error = ();
type InitError = ();
type Service = DefaultAcceptor;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self) -> Self::Future {
ok(DefaultAcceptor)
}
}
impl Service for DefaultAcceptor {
type Request = TcpStream;
type Response = TcpStream;
type Error = ();
type Future = FutureResult<Self::Response, Self::Error>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, req: Self::Request) -> Self::Future {
ok(req)
}
}
pub(crate) struct TcpAcceptor<T> {
inner: T,
}
impl<T, E> TcpAcceptor<T>
where
T: NewService<Request = TcpStream, Error = AcceptorError<E>>,
T::InitError: fmt::Debug,
{
pub(crate) fn new(inner: T) -> Self {
TcpAcceptor { inner }
}
}
impl<T, E> NewService for TcpAcceptor<T>
where
T: NewService<Request = TcpStream, Error = AcceptorError<E>>,
T::InitError: fmt::Debug,
{
type Request = net::TcpStream;
type Response = T::Response;
type Error = AcceptorError<E>;
type InitError = T::InitError;
type Service = TcpAcceptorService<T::Service>;
type Future = TcpAcceptorResponse<T>;
fn new_service(&self) -> Self::Future {
TcpAcceptorResponse {
fut: self.inner.new_service(),
}
}
}
pub(crate) struct TcpAcceptorResponse<T>
where
T: NewService<Request = TcpStream>,
T::InitError: fmt::Debug,
{
fut: T::Future,
}
impl<T> Future for TcpAcceptorResponse<T>
where
T: NewService<Request = TcpStream>,
T::InitError: fmt::Debug,
{
type Item = TcpAcceptorService<T::Service>;
type Error = T::InitError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.fut.poll() {
Ok(Async::NotReady) => Ok(Async::NotReady),
Ok(Async::Ready(service)) => {
Ok(Async::Ready(TcpAcceptorService { inner: service }))
}
Err(e) => {
error!("Can not create accetor service: {:?}", e);
Err(e)
}
}
}
}
pub(crate) struct TcpAcceptorService<T> {
inner: T,
}
impl<T, E> Service for TcpAcceptorService<T>
where
T: Service<Request = TcpStream, Error = AcceptorError<E>>,
{
type Request = net::TcpStream;
type Response = T::Response;
type Error = AcceptorError<E>;
type Future = Either<T::Future, FutureResult<Self::Response, Self::Error>>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.inner.poll_ready()
}
fn call(&mut self, req: Self::Request) -> Self::Future {
let stream = TcpStream::from_std(req, &Handle::default()).map_err(|e| {
error!("Can not convert to an async tcp stream: {}", e);
AcceptorError::Io(e)
});
match stream {
Ok(stream) => Either::A(self.inner.call(stream)),
Err(e) => Either::B(err(e)),
}
}
}
#[doc(hidden)]
/// Acceptor timeout middleware
///
/// Applies timeout to request prcoessing.
pub struct AcceptorTimeout<T> {
inner: T,
timeout: Duration,
}
impl<T: NewService> AcceptorTimeout<T> {
/// Create new `AcceptorTimeout` instance. timeout is in milliseconds.
pub fn new(timeout: u64, inner: T) -> Self {
Self {
inner,
timeout: Duration::from_millis(timeout),
}
}
}
impl<T: NewService> NewService for AcceptorTimeout<T> {
type Request = T::Request;
type Response = T::Response;
type Error = AcceptorError<T::Error>;
type InitError = T::InitError;
type Service = AcceptorTimeoutService<T::Service>;
type Future = AcceptorTimeoutFut<T>;
fn new_service(&self) -> Self::Future {
AcceptorTimeoutFut {
fut: self.inner.new_service(),
timeout: self.timeout,
}
}
}
#[doc(hidden)]
pub struct AcceptorTimeoutFut<T: NewService> {
fut: T::Future,
timeout: Duration,
}
impl<T: NewService> Future for AcceptorTimeoutFut<T> {
type Item = AcceptorTimeoutService<T::Service>;
type Error = T::InitError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let inner = try_ready!(self.fut.poll());
Ok(Async::Ready(AcceptorTimeoutService {
inner,
timeout: self.timeout,
}))
}
}
#[doc(hidden)]
/// Acceptor timeout service
///
/// Applies timeout to request prcoessing.
pub struct AcceptorTimeoutService<T> {
inner: T,
timeout: Duration,
}
impl<T: Service> Service for AcceptorTimeoutService<T> {
type Request = T::Request;
type Response = T::Response;
type Error = AcceptorError<T::Error>;
type Future = AcceptorTimeoutResponse<T>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.inner.poll_ready().map_err(AcceptorError::Service)
}
fn call(&mut self, req: Self::Request) -> Self::Future {
AcceptorTimeoutResponse {
fut: self.inner.call(req),
sleep: sleep(self.timeout),
}
}
}
#[doc(hidden)]
pub struct AcceptorTimeoutResponse<T: Service> {
fut: T::Future,
sleep: Delay,
}
impl<T: Service> Future for AcceptorTimeoutResponse<T> {
type Item = T::Response;
type Error = AcceptorError<T::Error>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.fut.poll().map_err(AcceptorError::Service)? {
Async::NotReady => match self.sleep.poll() {
Err(_) => Err(AcceptorError::Timeout),
Ok(Async::Ready(_)) => Err(AcceptorError::Timeout),
Ok(Async::NotReady) => Ok(Async::NotReady),
},
Async::Ready(resp) => Ok(Async::Ready(resp)),
}
}
}
pub(crate) struct ServerMessageAcceptor<T, H: HttpHandler> {
inner: T,
settings: ServiceConfig<H>,
}
impl<T, H> ServerMessageAcceptor<T, H>
where
H: HttpHandler,
T: NewService<Request = net::TcpStream>,
{
pub(crate) fn new(settings: ServiceConfig<H>, inner: T) -> Self {
ServerMessageAcceptor { inner, settings }
}
}
impl<T, H> NewService for ServerMessageAcceptor<T, H>
where
H: HttpHandler,
T: NewService<Request = net::TcpStream>,
{
type Request = ServerMessage;
type Response = ();
type Error = T::Error;
type InitError = T::InitError;
type Service = ServerMessageAcceptorService<T::Service, H>;
type Future = ServerMessageAcceptorResponse<T, H>;
fn new_service(&self) -> Self::Future {
ServerMessageAcceptorResponse {
fut: self.inner.new_service(),
settings: self.settings.clone(),
}
}
}
pub(crate) struct ServerMessageAcceptorResponse<T, H>
where
H: HttpHandler,
T: NewService<Request = net::TcpStream>,
{
fut: T::Future,
settings: ServiceConfig<H>,
}
impl<T, H> Future for ServerMessageAcceptorResponse<T, H>
where
H: HttpHandler,
T: NewService<Request = net::TcpStream>,
{
type Item = ServerMessageAcceptorService<T::Service, H>;
type Error = T::InitError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.fut.poll()? {
Async::NotReady => Ok(Async::NotReady),
Async::Ready(service) => Ok(Async::Ready(ServerMessageAcceptorService {
inner: service,
settings: self.settings.clone(),
})),
}
}
}
pub(crate) struct ServerMessageAcceptorService<T, H: HttpHandler> {
inner: T,
settings: ServiceConfig<H>,
}
impl<T, H> Service for ServerMessageAcceptorService<T, H>
where
H: HttpHandler,
T: Service<Request = net::TcpStream>,
{
type Request = ServerMessage;
type Response = ();
type Error = T::Error;
type Future =
Either<ServerMessageAcceptorServiceFut<T>, FutureResult<(), Self::Error>>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.inner.poll_ready()
}
fn call(&mut self, req: Self::Request) -> Self::Future {
match req {
ServerMessage::Connect(stream) => {
Either::A(ServerMessageAcceptorServiceFut {
fut: self.inner.call(stream),
})
}
ServerMessage::Shutdown(_) => Either::B(ok(())),
ServerMessage::ForceShutdown => {
// self.settings
// .head()
// .traverse(|proto: &mut HttpProtocol<TcpStream, H>| proto.shutdown());
Either::B(ok(()))
}
}
}
}
pub(crate) struct ServerMessageAcceptorServiceFut<T: Service> {
fut: T::Future,
}
impl<T> Future for ServerMessageAcceptorServiceFut<T>
where
T: Service,
{
type Item = ();
type Error = T::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.fut.poll()? {
Async::NotReady => Ok(Async::NotReady),
Async::Ready(_) => Ok(Async::Ready(())),
}
}
}

117
src/server/builder.rs Normal file
View File

@@ -0,0 +1,117 @@
use std::{fmt, net};
use actix_net::either::Either;
use actix_net::server::{Server, ServiceFactory};
use actix_net::service::{NewService, NewServiceExt};
use super::acceptor::{
AcceptorServiceFactory, AcceptorTimeout, ServerMessageAcceptor, TcpAcceptor,
};
use super::error::AcceptorError;
use super::handler::IntoHttpHandler;
use super::service::HttpService;
use super::settings::{ServerSettings, ServiceConfig};
use super::KeepAlive;
pub(crate) trait ServiceProvider {
fn register(
&self, server: Server, lst: net::TcpListener, host: String,
addr: net::SocketAddr, keep_alive: KeepAlive, secure: bool, client_timeout: u64,
client_shutdown: u64,
) -> Server;
}
/// Utility type that builds complete http pipeline
pub(crate) struct HttpServiceBuilder<F, H, A>
where
F: Fn() -> H + Send + Clone,
{
factory: F,
acceptor: A,
}
impl<F, H, A> HttpServiceBuilder<F, H, A>
where
F: Fn() -> H + Send + Clone + 'static,
H: IntoHttpHandler,
A: AcceptorServiceFactory,
<A::NewService as NewService>::InitError: fmt::Debug,
{
/// Create http service builder
pub fn new(factory: F, acceptor: A) -> Self {
Self { factory, acceptor }
}
fn finish(
&self, host: String, addr: net::SocketAddr, keep_alive: KeepAlive, secure: bool,
client_timeout: u64, client_shutdown: u64,
) -> impl ServiceFactory {
let factory = self.factory.clone();
let acceptor = self.acceptor.clone();
move || {
let app = (factory)().into_handler();
let settings = ServiceConfig::new(
app,
keep_alive,
client_timeout,
client_shutdown,
ServerSettings::new(addr, &host, false),
);
if secure {
Either::B(ServerMessageAcceptor::new(
settings.clone(),
TcpAcceptor::new(AcceptorTimeout::new(
client_timeout,
acceptor.create(),
)).map_err(|_| ())
.map_init_err(|_| ())
.and_then(
HttpService::new(settings)
.map_init_err(|_| ())
.map_err(|_| ()),
),
))
} else {
Either::A(ServerMessageAcceptor::new(
settings.clone(),
TcpAcceptor::new(acceptor.create().map_err(AcceptorError::Service))
.map_err(|_| ())
.map_init_err(|_| ())
.and_then(
HttpService::new(settings)
.map_init_err(|_| ())
.map_err(|_| ()),
),
))
}
}
}
}
impl<F, H, A> ServiceProvider for HttpServiceBuilder<F, H, A>
where
F: Fn() -> H + Send + Clone + 'static,
A: AcceptorServiceFactory,
<A::NewService as NewService>::InitError: fmt::Debug,
H: IntoHttpHandler,
{
fn register(
&self, server: Server, lst: net::TcpListener, host: String,
addr: net::SocketAddr, keep_alive: KeepAlive, secure: bool, client_timeout: u64,
client_shutdown: u64,
) -> Server {
server.listen2(
"actix-web",
lst,
self.finish(
host,
addr,
keep_alive,
secure,
client_timeout,
client_shutdown,
),
)
}
}

View File

@@ -1,22 +1,43 @@
use std::net::{Shutdown, SocketAddr}; use std::net::Shutdown;
use std::rc::Rc; use std::{io, mem, time};
use std::{io, ptr, time};
use bytes::{Buf, BufMut, BytesMut}; use bytes::{Buf, BufMut, BytesMut};
use futures::{Async, Future, Poll}; use futures::{Async, Future, Poll};
use tokio_io::{AsyncRead, AsyncWrite}; use tokio_io::{AsyncRead, AsyncWrite};
use tokio_timer::Delay;
use super::settings::WorkerSettings; use super::error::HttpDispatchError;
use super::{h1, h2, ConnectionTag, HttpHandler, IoStream}; use super::settings::ServiceConfig;
use super::{h1, h2, HttpHandler, IoStream};
use http::StatusCode;
const HTTP2_PREFACE: [u8; 14] = *b"PRI * HTTP/2.0"; const HTTP2_PREFACE: [u8; 14] = *b"PRI * HTTP/2.0";
enum HttpProtocol<T: IoStream, H: HttpHandler + 'static> { pub(crate) enum HttpProtocol<T: IoStream, H: HttpHandler + 'static> {
H1(h1::Http1<T, H>), H1(h1::Http1Dispatcher<T, H>),
H2(h2::Http2<T, H>), H2(h2::Http2<T, H>),
Unknown(Rc<WorkerSettings<H>>, Option<SocketAddr>, T, BytesMut), Unknown(ServiceConfig<H>, T, BytesMut),
None,
} }
// impl<T: IoStream, H: HttpHandler + 'static> HttpProtocol<T, H> {
// fn shutdown_(&mut self) {
// match self {
// HttpProtocol::H1(ref mut h1) => {
// let io = h1.io();
// let _ = IoStream::set_linger(io, Some(time::Duration::new(0, 0)));
// let _ = IoStream::shutdown(io, Shutdown::Both);
// }
// HttpProtocol::H2(ref mut h2) => h2.shutdown(),
// HttpProtocol::Unknown(_, io, _) => {
// let _ = IoStream::set_linger(io, Some(time::Duration::new(0, 0)));
// let _ = IoStream::shutdown(io, Shutdown::Both);
// }
// HttpProtocol::None => (),
// }
// }
// }
enum ProtocolKind { enum ProtocolKind {
Http1, Http1,
Http2, Http2,
@@ -28,9 +49,9 @@ where
T: IoStream, T: IoStream,
H: HttpHandler + 'static, H: HttpHandler + 'static,
{ {
proto: Option<HttpProtocol<T, H>>, proto: HttpProtocol<T, H>,
node: Option<Node<HttpChannel<T, H>>>, node: Option<Node<()>>,
_tag: ConnectionTag, ka_timeout: Option<Delay>,
} }
impl<T, H> HttpChannel<T, H> impl<T, H> HttpChannel<T, H>
@@ -38,32 +59,25 @@ where
T: IoStream, T: IoStream,
H: HttpHandler + 'static, H: HttpHandler + 'static,
{ {
pub(crate) fn new( pub(crate) fn new(settings: ServiceConfig<H>, io: T) -> HttpChannel<T, H> {
settings: Rc<WorkerSettings<H>>, io: T, peer: Option<SocketAddr>, let ka_timeout = settings.client_timer();
) -> HttpChannel<T, H> {
let _tag = settings.connection();
HttpChannel { HttpChannel {
_tag, ka_timeout,
node: None, node: None,
proto: Some(HttpProtocol::Unknown( proto: HttpProtocol::Unknown(settings, io, BytesMut::with_capacity(8192)),
settings,
peer,
io,
BytesMut::with_capacity(8192),
)),
} }
} }
}
fn shutdown(&mut self) { impl<T, H> Drop for HttpChannel<T, H>
match self.proto { where
Some(HttpProtocol::H1(ref mut h1)) => { T: IoStream,
let io = h1.io(); H: HttpHandler + 'static,
let _ = IoStream::set_linger(io, Some(time::Duration::new(0, 0))); {
let _ = IoStream::shutdown(io, Shutdown::Both); fn drop(&mut self) {
} if let Some(mut node) = self.node.take() {
Some(HttpProtocol::H2(ref mut h2)) => h2.shutdown(), node.remove()
_ => (),
} }
} }
} }
@@ -74,62 +88,74 @@ where
H: HttpHandler + 'static, H: HttpHandler + 'static,
{ {
type Item = (); type Item = ();
type Error = (); type Error = HttpDispatchError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> { fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.node.is_some() { // keep-alive timer
let el = self as *mut _; if self.ka_timeout.is_some() {
self.node = Some(Node::new(el)); match self.ka_timeout.as_mut().unwrap().poll() {
Ok(Async::Ready(_)) => {
trace!("Slow request timed out, close connection");
let proto = mem::replace(&mut self.proto, HttpProtocol::None);
if let HttpProtocol::Unknown(settings, io, buf) = proto {
self.proto = HttpProtocol::H1(h1::Http1Dispatcher::for_error(
settings,
io,
StatusCode::REQUEST_TIMEOUT,
self.ka_timeout.take(),
buf,
));
return self.poll();
}
return Ok(Async::Ready(()));
}
Ok(Async::NotReady) => (),
Err(_) => panic!("Something is really wrong"),
}
}
if self.node.is_none() {
self.node = Some(Node::new(()));
let _ = match self.proto { let _ = match self.proto {
Some(HttpProtocol::H1(ref mut h1)) => { HttpProtocol::H1(ref mut h1) => {
self.node.as_mut().map(|n| h1.settings().head().insert(n)) self.node.as_mut().map(|n| h1.settings().head().insert(n))
} }
Some(HttpProtocol::H2(ref mut h2)) => { HttpProtocol::H2(ref mut h2) => {
self.node.as_mut().map(|n| h2.settings().head().insert(n)) self.node.as_mut().map(|n| h2.settings().head().insert(n))
} }
Some(HttpProtocol::Unknown(ref mut settings, _, _, _)) => { HttpProtocol::Unknown(ref mut settings, _, _) => {
self.node.as_mut().map(|n| settings.head().insert(n)) self.node.as_mut().map(|n| settings.head().insert(n))
} }
None => unreachable!(), HttpProtocol::None => unreachable!(),
}; };
} }
let mut is_eof = false;
let kind = match self.proto { let kind = match self.proto {
Some(HttpProtocol::H1(ref mut h1)) => { HttpProtocol::H1(ref mut h1) => return h1.poll(),
let result = h1.poll(); HttpProtocol::H2(ref mut h2) => return h2.poll(),
match result { HttpProtocol::Unknown(_, ref mut io, ref mut buf) => {
Ok(Async::Ready(())) | Err(_) => { let mut err = None;
if let Some(n) = self.node.as_mut() { let mut disconnect = false;
n.remove()
};
}
_ => (),
}
return result;
}
Some(HttpProtocol::H2(ref mut h2)) => {
let result = h2.poll();
match result {
Ok(Async::Ready(())) | Err(_) => {
if let Some(n) = self.node.as_mut() {
n.remove()
};
}
_ => (),
}
return result;
}
Some(HttpProtocol::Unknown(_, _, ref mut io, ref mut buf)) => {
match io.read_available(buf) { match io.read_available(buf) {
Ok(Async::Ready(true)) | Err(_) => { Ok(Async::Ready((read_some, stream_closed))) => {
debug!("Ignored premature client disconnection"); is_eof = stream_closed;
if let Some(n) = self.node.as_mut() { // Only disconnect if no data was read.
n.remove() if is_eof && !read_some {
}; disconnect = true;
return Err(()); }
}
Err(e) => {
err = Some(e.into());
} }
_ => (), _ => (),
} }
if disconnect {
debug!("Ignored premature client disconnection");
return Ok(Async::Ready(()));
} else if let Some(e) = err {
return Err(e);
}
if buf.len() >= 14 { if buf.len() >= 14 {
if buf[..14] == HTTP2_PREFACE[..] { if buf[..14] == HTTP2_PREFACE[..] {
@@ -141,24 +167,30 @@ where
return Ok(Async::NotReady); return Ok(Async::NotReady);
} }
} }
None => unreachable!(), HttpProtocol::None => unreachable!(),
}; };
// upgrade to specific http protocol // upgrade to specific http protocol
if let Some(HttpProtocol::Unknown(settings, addr, io, buf)) = self.proto.take() { let proto = mem::replace(&mut self.proto, HttpProtocol::None);
if let HttpProtocol::Unknown(settings, io, buf) = proto {
match kind { match kind {
ProtocolKind::Http1 => { ProtocolKind::Http1 => {
self.proto = self.proto = HttpProtocol::H1(h1::Http1Dispatcher::new(
Some(HttpProtocol::H1(h1::Http1::new(settings, io, addr, buf))); settings,
io,
buf,
is_eof,
self.ka_timeout.take(),
));
return self.poll(); return self.poll();
} }
ProtocolKind::Http2 => { ProtocolKind::Http2 => {
self.proto = Some(HttpProtocol::H2(h2::Http2::new( self.proto = HttpProtocol::H2(h2::Http2::new(
settings, settings,
io, io,
addr,
buf.freeze(), buf.freeze(),
))); self.ka_timeout.take(),
));
return self.poll(); return self.poll();
} }
} }
@@ -167,46 +199,120 @@ where
} }
} }
#[doc(hidden)]
pub struct H1Channel<T, H>
where
T: IoStream,
H: HttpHandler + 'static,
{
proto: HttpProtocol<T, H>,
node: Option<Node<()>>,
}
impl<T, H> H1Channel<T, H>
where
T: IoStream,
H: HttpHandler + 'static,
{
pub(crate) fn new(settings: ServiceConfig<H>, io: T) -> H1Channel<T, H> {
H1Channel {
node: None,
proto: HttpProtocol::H1(h1::Http1Dispatcher::new(
settings,
io,
BytesMut::with_capacity(8192),
false,
None,
)),
}
}
}
impl<T, H> Drop for H1Channel<T, H>
where
T: IoStream,
H: HttpHandler + 'static,
{
fn drop(&mut self) {
if let Some(mut node) = self.node.take() {
node.remove();
}
}
}
impl<T, H> Future for H1Channel<T, H>
where
T: IoStream,
H: HttpHandler + 'static,
{
type Item = ();
type Error = HttpDispatchError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.node.is_none() {
self.node = Some(Node::new(()));
match self.proto {
HttpProtocol::H1(ref mut h1) => {
self.node.as_mut().map(|n| h1.settings().head().insert(n));
}
_ => unreachable!(),
};
}
match self.proto {
HttpProtocol::H1(ref mut h1) => h1.poll(),
_ => unreachable!(),
}
}
}
pub(crate) struct Node<T> { pub(crate) struct Node<T> {
next: Option<*mut Node<T>>, next: Option<*mut Node<T>>,
prev: Option<*mut Node<T>>, prev: Option<*mut Node<T>>,
element: *mut T, element: T,
} }
impl<T> Node<T> { impl<T> Node<T> {
fn new(el: *mut T) -> Self { fn new(element: T) -> Self {
Node { Node {
element,
next: None, next: None,
prev: None, prev: None,
element: el,
} }
} }
fn insert<I>(&mut self, next: &mut Node<I>) { fn insert<I>(&mut self, next_el: &mut Node<I>) {
unsafe { let next: *mut Node<T> = next_el as *const _ as *mut _;
let next: *mut Node<T> = next as *const _ as *mut _;
if let Some(ref mut next2) = self.next { if let Some(next2) = self.next {
unsafe {
let n = next2.as_mut().unwrap(); let n = next2.as_mut().unwrap();
n.prev = Some(next); n.prev = Some(next);
} }
self.next = Some(next); next_el.next = Some(next2 as *mut _);
}
self.next = Some(next);
unsafe {
let next: &mut Node<T> = &mut *next; let next: &mut Node<T> = &mut *next;
next.prev = Some(self as *mut _); next.prev = Some(self as *mut _);
} }
} }
fn remove(&mut self) { fn remove(&mut self) {
unsafe { let next = self.next.take();
self.element = ptr::null_mut(); let prev = self.prev.take();
let next = self.next.take();
let mut prev = self.prev.take();
if let Some(ref mut prev) = prev { if let Some(prev) = prev {
unsafe {
prev.as_mut().unwrap().next = next; prev.as_mut().unwrap().next = next;
} }
} }
if let Some(next) = next {
unsafe {
next.as_mut().unwrap().prev = prev;
}
}
} }
} }
@@ -215,30 +321,28 @@ impl Node<()> {
Node { Node {
next: None, next: None,
prev: None, prev: None,
element: ptr::null_mut(), element: (),
} }
} }
pub(crate) fn traverse<T, H>(&self) pub(crate) fn traverse<T, H, F: Fn(&mut HttpProtocol<T, H>)>(&self, f: F)
where where
T: IoStream, T: IoStream,
H: HttpHandler + 'static, H: HttpHandler + 'static,
{ {
let mut next = self.next.as_ref(); if let Some(n) = self.next.as_ref() {
loop { unsafe {
if let Some(n) = next { let mut next: &mut Node<HttpProtocol<T, H>> =
unsafe { &mut *(n.as_ref().unwrap() as *const _ as *mut _);
let n: &Node<()> = &*(n.as_ref().unwrap() as *const _); loop {
next = n.next.as_ref(); f(&mut next.element);
if !n.element.is_null() { next = if let Some(n) = next.next.as_ref() {
let ch: &mut HttpChannel<T, H> = &mut **n
&mut *(&mut *(n.element as *mut _) as *mut () as *mut _); } else {
ch.shutdown(); return;
} }
} }
} else {
return;
} }
} }
} }
@@ -277,6 +381,10 @@ where
fn set_linger(&mut self, _: Option<time::Duration>) -> io::Result<()> { fn set_linger(&mut self, _: Option<time::Duration>) -> io::Result<()> {
Ok(()) Ok(())
} }
#[inline]
fn set_keepalive(&mut self, _: Option<time::Duration>) -> io::Result<()> {
Ok(())
}
} }
impl<T> io::Read for WrapperStream<T> impl<T> io::Read for WrapperStream<T>

View File

@@ -1,9 +1,84 @@
use std::io;
use futures::{Async, Poll}; use futures::{Async, Poll};
use http2;
use super::{helpers, HttpHandlerTask, Writer}; use super::{helpers, HttpHandlerTask, Writer};
use http::{StatusCode, Version}; use http::{StatusCode, Version};
use Error; use Error;
/// Errors produced by `AcceptorError` service.
#[derive(Debug)]
pub enum AcceptorError<T> {
/// The inner service error
Service(T),
/// Io specific error
Io(io::Error),
/// The request did not complete within the specified timeout.
Timeout,
}
#[derive(Fail, Debug)]
/// A set of errors that can occur during dispatching http requests
pub enum HttpDispatchError {
/// Application error
#[fail(display = "Application specific error: {}", _0)]
App(Error),
/// An `io::Error` that occurred while trying to read or write to a network
/// stream.
#[fail(display = "IO error: {}", _0)]
Io(io::Error),
/// The first request did not complete within the specified timeout.
#[fail(display = "The first request did not complete within the specified timeout")]
SlowRequestTimeout,
/// Shutdown timeout
#[fail(display = "Connection shutdown timeout")]
ShutdownTimeout,
/// HTTP2 error
#[fail(display = "HTTP2 error: {}", _0)]
Http2(http2::Error),
/// Payload is not consumed
#[fail(display = "Task is completed but request's payload is not consumed")]
PayloadIsNotConsumed,
/// Malformed request
#[fail(display = "Malformed request")]
MalformedRequest,
/// Internal error
#[fail(display = "Internal error")]
InternalError,
/// Unknown error
#[fail(display = "Unknown error")]
Unknown,
}
impl From<Error> for HttpDispatchError {
fn from(err: Error) -> Self {
HttpDispatchError::App(err)
}
}
impl From<io::Error> for HttpDispatchError {
fn from(err: io::Error) -> Self {
HttpDispatchError::Io(err)
}
}
impl From<http2::Error> for HttpDispatchError {
fn from(err: http2::Error) -> Self {
HttpDispatchError::Http2(err)
}
}
pub(crate) struct ServerError(Version, StatusCode); pub(crate) struct ServerError(Version, StatusCode);
impl ServerError { impl ServerError {
@@ -21,7 +96,12 @@ impl HttpHandlerTask for ServerError {
bytes.reserve(helpers::STATUS_LINE_BUF_SIZE + 1); bytes.reserve(helpers::STATUS_LINE_BUF_SIZE + 1);
helpers::write_status_line(self.0, self.1.as_u16(), bytes); helpers::write_status_line(self.0, self.1.as_u16(), bytes);
} }
// Convert Status Code to Reason.
let reason = self.1.canonical_reason().unwrap_or("");
io.buffer().extend_from_slice(reason.as_bytes());
// No response body.
io.buffer().extend_from_slice(b"\r\ncontent-length: 0\r\n"); io.buffer().extend_from_slice(b"\r\ncontent-length: 0\r\n");
// date header
io.set_date(); io.set_date();
Ok(Async::Ready(true)) Ok(Async::Ready(true))
} }

View File

@@ -1,113 +1,155 @@
use std::collections::VecDeque; use std::collections::VecDeque;
use std::net::SocketAddr; use std::net::{Shutdown, SocketAddr};
use std::rc::Rc;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use bytes::BytesMut; use bytes::BytesMut;
use futures::{Async, Future, Poll}; use futures::{Async, Future, Poll};
use tokio_current_thread::spawn;
use tokio_timer::Delay; use tokio_timer::Delay;
use error::{Error, PayloadError}; use error::{Error, PayloadError};
use http::{StatusCode, Version}; use http::{StatusCode, Version};
use payload::{Payload, PayloadStatus, PayloadWriter}; use payload::{Payload, PayloadStatus, PayloadWriter};
use super::error::ServerError; use super::error::{HttpDispatchError, ServerError};
use super::h1decoder::{DecoderError, H1Decoder, Message}; use super::h1decoder::{DecoderError, H1Decoder, Message};
use super::h1writer::H1Writer; use super::h1writer::H1Writer;
use super::handler::{HttpHandler, HttpHandlerTask, HttpHandlerTaskFut};
use super::input::PayloadType; use super::input::PayloadType;
use super::settings::WorkerSettings; use super::settings::ServiceConfig;
use super::Writer; use super::{IoStream, Writer};
use super::{HttpHandler, HttpHandlerTask, IoStream};
const MAX_PIPELINED_MESSAGES: usize = 16; const MAX_PIPELINED_MESSAGES: usize = 16;
bitflags! { bitflags! {
struct Flags: u8 { pub struct Flags: u8 {
const STARTED = 0b0000_0001; const STARTED = 0b0000_0001;
const ERROR = 0b0000_0010; const KEEPALIVE_ENABLED = 0b0000_0010;
const KEEPALIVE = 0b0000_0100; const KEEPALIVE = 0b0000_0100;
const SHUTDOWN = 0b0000_1000; const SHUTDOWN = 0b0000_1000;
const DISCONNECTED = 0b0001_0000; const READ_DISCONNECTED = 0b0001_0000;
const POLLED = 0b0010_0000; const WRITE_DISCONNECTED = 0b0010_0000;
const POLLED = 0b0100_0000;
const FLUSHED = 0b1000_0000;
} }
} }
bitflags! { /// Dispatcher for HTTP/1.1 protocol
struct EntryFlags: u8 { pub struct Http1Dispatcher<T: IoStream, H: HttpHandler + 'static> {
const EOF = 0b0000_0001;
const ERROR = 0b0000_0010;
const FINISHED = 0b0000_0100;
}
}
pub(crate) struct Http1<T: IoStream, H: HttpHandler + 'static> {
flags: Flags, flags: Flags,
settings: Rc<WorkerSettings<H>>, settings: ServiceConfig<H>,
addr: Option<SocketAddr>, addr: Option<SocketAddr>,
stream: H1Writer<T, H>, stream: H1Writer<T, H>,
decoder: H1Decoder, decoder: H1Decoder,
payload: Option<PayloadType>, payload: Option<PayloadType>,
buf: BytesMut, buf: BytesMut,
tasks: VecDeque<Entry<H>>, tasks: VecDeque<Entry<H>>,
keepalive_timer: Option<Delay>, error: Option<HttpDispatchError>,
ka_expire: Instant,
ka_timer: Option<Delay>,
} }
enum EntryPipe<H: HttpHandler> { enum Entry<H: HttpHandler> {
Task(H::Task), Task(H::Task),
Error(Box<HttpHandlerTask>), Error(Box<HttpHandlerTask>),
} }
impl<H: HttpHandler> EntryPipe<H> { impl<H: HttpHandler> Entry<H> {
fn into_task(self) -> H::Task {
match self {
Entry::Task(task) => task,
Entry::Error(_) => panic!(),
}
}
fn disconnected(&mut self) { fn disconnected(&mut self) {
match *self { match *self {
EntryPipe::Task(ref mut task) => task.disconnected(), Entry::Task(ref mut task) => task.disconnected(),
EntryPipe::Error(ref mut task) => task.disconnected(), Entry::Error(ref mut task) => task.disconnected(),
} }
} }
fn poll_io(&mut self, io: &mut Writer) -> Poll<bool, Error> { fn poll_io(&mut self, io: &mut Writer) -> Poll<bool, Error> {
match *self { match *self {
EntryPipe::Task(ref mut task) => task.poll_io(io), Entry::Task(ref mut task) => task.poll_io(io),
EntryPipe::Error(ref mut task) => task.poll_io(io), Entry::Error(ref mut task) => task.poll_io(io),
} }
} }
fn poll_completed(&mut self) -> Poll<(), Error> { fn poll_completed(&mut self) -> Poll<(), Error> {
match *self { match *self {
EntryPipe::Task(ref mut task) => task.poll_completed(), Entry::Task(ref mut task) => task.poll_completed(),
EntryPipe::Error(ref mut task) => task.poll_completed(), Entry::Error(ref mut task) => task.poll_completed(),
} }
} }
} }
struct Entry<H: HttpHandler> { impl<T, H> Http1Dispatcher<T, H>
pipe: EntryPipe<H>,
flags: EntryFlags,
}
impl<T, H> Http1<T, H>
where where
T: IoStream, T: IoStream,
H: HttpHandler + 'static, H: HttpHandler + 'static,
{ {
pub fn new( pub fn new(
settings: Rc<WorkerSettings<H>>, stream: T, addr: Option<SocketAddr>, settings: ServiceConfig<H>, stream: T, buf: BytesMut, is_eof: bool,
buf: BytesMut, keepalive_timer: Option<Delay>,
) -> Self { ) -> Self {
Http1 { let addr = stream.peer_addr();
flags: Flags::KEEPALIVE, let (ka_expire, ka_timer) = if let Some(delay) = keepalive_timer {
stream: H1Writer::new(stream, Rc::clone(&settings)), (delay.deadline(), Some(delay))
} else if let Some(delay) = settings.keep_alive_timer() {
(delay.deadline(), Some(delay))
} else {
(settings.now(), None)
};
let flags = if is_eof {
Flags::READ_DISCONNECTED | Flags::FLUSHED
} else if settings.keep_alive_enabled() {
Flags::KEEPALIVE | Flags::KEEPALIVE_ENABLED | Flags::FLUSHED
} else {
Flags::empty()
};
Http1Dispatcher {
stream: H1Writer::new(stream, settings.clone()),
decoder: H1Decoder::new(), decoder: H1Decoder::new(),
payload: None, payload: None,
tasks: VecDeque::new(), tasks: VecDeque::new(),
keepalive_timer: None, error: None,
flags,
addr, addr,
buf, buf,
settings, settings,
ka_timer,
ka_expire,
} }
} }
pub(crate) fn for_error(
settings: ServiceConfig<H>, stream: T, status: StatusCode,
mut keepalive_timer: Option<Delay>, buf: BytesMut,
) -> Self {
if let Some(deadline) = settings.client_timer_expire() {
let _ = keepalive_timer.as_mut().map(|delay| delay.reset(deadline));
}
let mut disp = Http1Dispatcher {
flags: Flags::STARTED | Flags::READ_DISCONNECTED | Flags::FLUSHED,
stream: H1Writer::new(stream, settings.clone()),
decoder: H1Decoder::new(),
payload: None,
tasks: VecDeque::new(),
error: None,
addr: None,
ka_timer: keepalive_timer,
ka_expire: settings.now(),
buf,
settings,
};
disp.push_response_entry(status);
disp
}
#[inline] #[inline]
pub fn settings(&self) -> &WorkerSettings<H> { pub fn settings(&self) -> &ServiceConfig<H> {
self.settings.as_ref() &self.settings
} }
#[inline] #[inline]
@@ -117,6 +159,10 @@ where
#[inline] #[inline]
fn can_read(&self) -> bool { fn can_read(&self) -> bool {
if self.flags.contains(Flags::READ_DISCONNECTED) {
return false;
}
if let Some(ref info) = self.payload { if let Some(ref info) = self.payload {
info.need_read() == PayloadStatus::Read info.need_read() == PayloadStatus::Read
} else { } else {
@@ -124,242 +170,302 @@ where
} }
} }
fn notify_disconnect(&mut self) { // if checked is set to true, delay disconnect until all tasks have finished.
// notify all tasks fn client_disconnected(&mut self, checked: bool) {
self.stream.disconnected(); self.flags.insert(Flags::READ_DISCONNECTED);
for task in &mut self.tasks { if let Some(mut payload) = self.payload.take() {
task.pipe.disconnected(); payload.set_error(PayloadError::Incomplete);
}
if !checked || self.tasks.is_empty() {
self.flags
.insert(Flags::WRITE_DISCONNECTED | Flags::FLUSHED);
self.stream.disconnected();
// notify all tasks
for mut task in self.tasks.drain(..) {
task.disconnected();
match task.poll_completed() {
Ok(Async::NotReady) => {
// spawn not completed task, it does not require access to io
// at this point
spawn(HttpHandlerTaskFut::new(task.into_task()));
}
Ok(Async::Ready(_)) => (),
Err(err) => {
error!("Unhandled application error: {}", err);
}
}
}
} }
} }
#[inline] #[inline]
pub fn poll(&mut self) -> Poll<(), ()> { pub fn poll(&mut self) -> Poll<(), HttpDispatchError> {
// keep-alive timer // check connection keep-alive
if let Some(ref mut timer) = self.keepalive_timer { self.poll_keepalive()?;
match timer.poll() {
Ok(Async::Ready(_)) => {
trace!("Keep-alive timeout, close connection");
self.flags.insert(Flags::SHUTDOWN);
}
Ok(Async::NotReady) => (),
Err(_) => unreachable!(),
}
}
// shutdown // shutdown
if self.flags.contains(Flags::SHUTDOWN) { if self.flags.contains(Flags::SHUTDOWN) {
match self.stream.poll_completed(true) { if self.flags.contains(Flags::WRITE_DISCONNECTED) {
Ok(Async::NotReady) => return Ok(Async::NotReady), return Ok(Async::Ready(()));
Ok(Async::Ready(_)) => return Ok(Async::Ready(())),
Err(err) => {
debug!("Error sending data: {}", err);
return Err(());
}
} }
return self.poll_flush(true);
} }
self.poll_io(); // process incoming requests
if !self.flags.contains(Flags::WRITE_DISCONNECTED) {
self.poll_handler()?;
loop { // flush stream
match self.poll_handler()? { self.poll_flush(false)?;
Async::Ready(true) => {
self.poll_io(); // deal with keep-alive and stream eof (client-side write shutdown)
if self.tasks.is_empty() && self.flags.contains(Flags::FLUSHED) {
// handle stream eof
if self
.flags
.intersects(Flags::READ_DISCONNECTED | Flags::WRITE_DISCONNECTED)
{
return Ok(Async::Ready(()));
} }
Async::Ready(false) => { // no keep-alive
if self.flags.contains(Flags::STARTED)
&& (!self.flags.contains(Flags::KEEPALIVE_ENABLED)
|| !self.flags.contains(Flags::KEEPALIVE))
{
self.flags.insert(Flags::SHUTDOWN); self.flags.insert(Flags::SHUTDOWN);
return self.poll(); return self.poll();
} }
Async::NotReady => return Ok(Async::NotReady),
} }
Ok(Async::NotReady)
} else if let Some(err) = self.error.take() {
Err(err)
} else {
Ok(Async::Ready(()))
} }
} }
#[inline] /// Flush stream
/// read data from stream fn poll_flush(&mut self, shutdown: bool) -> Poll<(), HttpDispatchError> {
pub fn poll_io(&mut self) { if shutdown || self.flags.contains(Flags::STARTED) {
if !self.flags.contains(Flags::POLLED) { match self.stream.poll_completed(shutdown) {
self.parse(); Ok(Async::NotReady) => {
self.flags.insert(Flags::POLLED); // mark stream
return; if !self.stream.flushed() {
self.flags.remove(Flags::FLUSHED);
}
Ok(Async::NotReady)
}
Err(err) => {
debug!("Error sending data: {}", err);
self.client_disconnected(false);
Err(err.into())
}
Ok(Async::Ready(_)) => {
// if payload is not consumed we can not use connection
if self.payload.is_some() && self.tasks.is_empty() {
return Err(HttpDispatchError::PayloadIsNotConsumed);
}
self.flags.insert(Flags::FLUSHED);
Ok(Async::Ready(()))
}
}
} else {
Ok(Async::Ready(()))
} }
// read io from socket }
if !self.flags.intersects(Flags::ERROR)
&& self.tasks.len() < MAX_PIPELINED_MESSAGES
&& self.can_read()
{
match self.stream.get_mut().read_available(&mut self.buf) {
Ok(Async::Ready(disconnected)) => {
if disconnected {
// notify all tasks
self.notify_disconnect();
// kill keepalive
self.keepalive_timer.take();
// on parse error, stop reading stream but tasks need to be /// keep-alive timer. returns `true` is keep-alive, otherwise drop
// completed fn poll_keepalive(&mut self) -> Result<(), HttpDispatchError> {
self.flags.insert(Flags::ERROR); if let Some(ref mut timer) = self.ka_timer {
match timer.poll() {
Ok(Async::Ready(_)) => {
// if we get timer during shutdown, just drop connection
if self.flags.contains(Flags::SHUTDOWN) {
let io = self.stream.get_mut();
let _ = IoStream::set_linger(io, Some(Duration::from_secs(0)));
let _ = IoStream::shutdown(io, Shutdown::Both);
return Err(HttpDispatchError::ShutdownTimeout);
}
if timer.deadline() >= self.ka_expire {
// check for any outstanding request handling
if self.tasks.is_empty() {
if !self.flags.contains(Flags::STARTED) {
// timeout on first request (slow request) return 408
trace!("Slow request timeout");
self.flags
.insert(Flags::STARTED | Flags::READ_DISCONNECTED);
self.tasks.push_back(Entry::Error(ServerError::err(
Version::HTTP_11,
StatusCode::REQUEST_TIMEOUT,
)));
} else {
trace!("Keep-alive timeout, close connection");
self.flags.insert(Flags::SHUTDOWN);
if let Some(mut payload) = self.payload.take() { // start shutdown timer
payload.set_error(PayloadError::Incomplete); if let Some(deadline) =
self.settings.client_shutdown_timer()
{
timer.reset(deadline)
} else {
return Ok(());
}
}
} else if let Some(dl) = self.settings.keep_alive_expire() {
timer.reset(dl)
} }
} else { } else {
self.parse(); timer.reset(self.ka_expire)
} }
} }
Ok(Async::NotReady) => (), Ok(Async::NotReady) => (),
Err(_) => { Err(e) => {
// notify all tasks error!("Timer error {:?}", e);
self.notify_disconnect(); return Err(HttpDispatchError::Unknown);
// kill keepalive
self.keepalive_timer.take();
// on parse error, stop reading stream but tasks need to be
// completed
self.flags.insert(Flags::ERROR);
if let Some(mut payload) = self.payload.take() {
payload.set_error(PayloadError::Incomplete);
}
} }
} }
} }
Ok(())
} }
pub fn poll_handler(&mut self) -> Poll<bool, ()> { #[inline]
let retry = self.can_read(); /// read data from the stream
pub(self) fn poll_io(&mut self) -> Result<bool, HttpDispatchError> {
// check in-flight messages if !self.flags.contains(Flags::POLLED) {
let mut io = false; self.flags.insert(Flags::POLLED);
let mut idx = 0; if !self.buf.is_empty() {
while idx < self.tasks.len() { let updated = self.parse()?;
// only one task can do io operation in http/1 return Ok(updated);
if !io && !self.tasks[idx].flags.contains(EntryFlags::EOF) {
// io is corrupted, send buffer
if self.tasks[idx].flags.contains(EntryFlags::ERROR) {
if let Ok(Async::NotReady) = self.stream.poll_completed(true) {
return Ok(Async::NotReady);
}
self.flags.insert(Flags::ERROR);
return Err(());
}
match self.tasks[idx].pipe.poll_io(&mut self.stream) {
Ok(Async::Ready(ready)) => {
// override keep-alive state
if self.stream.keepalive() {
self.flags.insert(Flags::KEEPALIVE);
} else {
self.flags.remove(Flags::KEEPALIVE);
}
// prepare stream for next response
self.stream.reset();
if ready {
self.tasks[idx]
.flags
.insert(EntryFlags::EOF | EntryFlags::FINISHED);
} else {
self.tasks[idx].flags.insert(EntryFlags::EOF);
}
}
// no more IO for this iteration
Ok(Async::NotReady) => {
// check if previously read backpressure was enabled
if self.can_read() && !retry {
return Ok(Async::Ready(true));
}
io = true;
}
Err(err) => {
// it is not possible to recover from error
// during pipe handling, so just drop connection
self.notify_disconnect();
self.tasks[idx].flags.insert(EntryFlags::ERROR);
error!("Unhandled error1: {}", err);
continue;
}
}
} else if !self.tasks[idx].flags.contains(EntryFlags::FINISHED) {
match self.tasks[idx].pipe.poll_completed() {
Ok(Async::NotReady) => (),
Ok(Async::Ready(_)) => {
self.tasks[idx].flags.insert(EntryFlags::FINISHED)
}
Err(err) => {
self.notify_disconnect();
self.tasks[idx].flags.insert(EntryFlags::ERROR);
error!("Unhandled error: {}", err);
continue;
}
}
}
idx += 1;
}
// cleanup finished tasks
let max = self.tasks.len() >= MAX_PIPELINED_MESSAGES;
while !self.tasks.is_empty() {
if self.tasks[0]
.flags
.contains(EntryFlags::EOF | EntryFlags::FINISHED)
{
self.tasks.pop_front();
} else {
break;
} }
} }
// read more message
if max && self.tasks.len() >= MAX_PIPELINED_MESSAGES {
return Ok(Async::Ready(true));
}
// check stream state // read io from socket
if self.flags.contains(Flags::STARTED) { let mut updated = false;
match self.stream.poll_completed(false) { if self.can_read() && self.tasks.len() < MAX_PIPELINED_MESSAGES {
Ok(Async::NotReady) => return Ok(Async::NotReady), match self.stream.get_mut().read_available(&mut self.buf) {
Ok(Async::Ready((read_some, disconnected))) => {
if read_some && self.parse()? {
updated = true;
}
if disconnected {
self.client_disconnected(true);
}
}
Ok(Async::NotReady) => (),
Err(err) => { Err(err) => {
debug!("Error sending data: {}", err); self.client_disconnected(false);
self.notify_disconnect(); return Err(err.into());
return Err(());
}
Ok(Async::Ready(_)) => {
// non consumed payload in that case close connection
if self.payload.is_some() && self.tasks.is_empty() {
return Ok(Async::Ready(false));
}
} }
} }
} }
Ok(updated)
// deal with keep-alive
if self.tasks.is_empty() {
// no keep-alive
if self.flags.contains(Flags::ERROR)
|| (!self.flags.contains(Flags::KEEPALIVE)
|| !self.settings.keep_alive_enabled())
&& self.flags.contains(Flags::STARTED)
{
return Ok(Async::Ready(false));
}
// start keep-alive timer
let keep_alive = self.settings.keep_alive();
if self.keepalive_timer.is_none() && keep_alive > 0 {
trace!("Start keep-alive timer");
let mut timer =
Delay::new(Instant::now() + Duration::new(keep_alive, 0));
// register timer
let _ = timer.poll();
self.keepalive_timer = Some(timer);
}
}
Ok(Async::NotReady)
} }
pub fn parse(&mut self) { pub(self) fn poll_handler(&mut self) -> Result<(), HttpDispatchError> {
self.poll_io()?;
let mut retry = self.can_read();
// process first pipelined response, only first task can do io operation in http/1
while !self.tasks.is_empty() {
match self.tasks[0].poll_io(&mut self.stream) {
Ok(Async::Ready(ready)) => {
// override keep-alive state
if self.stream.keepalive() {
self.flags.insert(Flags::KEEPALIVE);
} else {
self.flags.remove(Flags::KEEPALIVE);
}
// prepare stream for next response
self.stream.reset();
let task = self.tasks.pop_front().unwrap();
if !ready {
// task is done with io operations but still needs to do more work
spawn(HttpHandlerTaskFut::new(task.into_task()));
}
}
Ok(Async::NotReady) => {
// check if we need timer
if self.ka_timer.is_some() && self.stream.upgrade() {
self.ka_timer.take();
}
// if read-backpressure is enabled and we consumed some data.
// we may read more dataand retry
if !retry && self.can_read() && self.poll_io()? {
retry = self.can_read();
continue;
}
break;
}
Err(err) => {
error!("Unhandled error1: {}", err);
// it is not possible to recover from error
// during pipe handling, so just drop connection
self.client_disconnected(false);
return Err(err.into());
}
}
}
// check in-flight messages. all tasks must be alive,
// they need to produce response. if app returned error
// and we can not continue processing incoming requests.
let mut idx = 1;
while idx < self.tasks.len() {
let stop = match self.tasks[idx].poll_completed() {
Ok(Async::NotReady) => false,
Ok(Async::Ready(_)) => true,
Err(err) => {
self.error = Some(err.into());
true
}
};
if stop {
// error in task handling or task is completed,
// so no response for this task which means we can not read more requests
// because pipeline sequence is broken.
// but we can safely complete existing tasks
self.flags.insert(Flags::READ_DISCONNECTED);
for mut task in self.tasks.drain(idx..) {
task.disconnected();
match task.poll_completed() {
Ok(Async::NotReady) => {
// spawn not completed task, it does not require access to io
// at this point
spawn(HttpHandlerTaskFut::new(task.into_task()));
}
Ok(Async::Ready(_)) => (),
Err(err) => {
error!("Unhandled application error: {}", err);
}
}
}
break;
} else {
idx += 1;
}
}
Ok(())
}
fn push_response_entry(&mut self, status: StatusCode) {
self.tasks
.push_back(Entry::Error(ServerError::err(Version::HTTP_11, status)));
}
pub(self) fn parse(&mut self) -> Result<bool, HttpDispatchError> {
let mut updated = false;
'outer: loop { 'outer: loop {
match self.decoder.decode(&mut self.buf, &self.settings) { match self.decoder.decode(&mut self.buf, &self.settings) {
Ok(Some(Message::Message { mut msg, payload })) => { Ok(Some(Message::Message { mut msg, payload })) => {
updated = true;
self.flags.insert(Flags::STARTED); self.flags.insert(Flags::STARTED);
if payload { if payload {
@@ -375,82 +481,76 @@ where
// set remote addr // set remote addr
msg.inner_mut().addr = self.addr; msg.inner_mut().addr = self.addr;
// stop keepalive timer
self.keepalive_timer.take();
// search handler for request // search handler for request
for h in self.settings.handlers().iter() { match self.settings.handler().handle(msg) {
msg = match h.handle(msg) { Ok(mut task) => {
Ok(mut pipe) => { if self.tasks.is_empty() {
if self.tasks.is_empty() { match task.poll_io(&mut self.stream) {
match pipe.poll_io(&mut self.stream) { Ok(Async::Ready(ready)) => {
Ok(Async::Ready(ready)) => { // override keep-alive state
// override keep-alive state if self.stream.keepalive() {
if self.stream.keepalive() { self.flags.insert(Flags::KEEPALIVE);
self.flags.insert(Flags::KEEPALIVE); } else {
} else { self.flags.remove(Flags::KEEPALIVE);
self.flags.remove(Flags::KEEPALIVE); }
} // prepare stream for next response
// prepare stream for next response self.stream.reset();
self.stream.reset();
if !ready { if !ready {
let item = Entry { // task is done with io operations
pipe: EntryPipe::Task(pipe), // but still needs to do more work
flags: EntryFlags::EOF, spawn(HttpHandlerTaskFut::new(task));
};
self.tasks.push_back(item);
}
continue 'outer;
}
Ok(Async::NotReady) => {}
Err(err) => {
error!("Unhandled error: {}", err);
self.flags.insert(Flags::ERROR);
return;
} }
continue 'outer;
}
Ok(Async::NotReady) => (),
Err(err) => {
error!("Unhandled error: {}", err);
self.client_disconnected(false);
return Err(err.into());
} }
} }
self.tasks.push_back(Entry {
pipe: EntryPipe::Task(pipe),
flags: EntryFlags::empty(),
});
continue 'outer;
} }
Err(msg) => msg, self.tasks.push_back(Entry::Task(task));
continue 'outer;
}
Err(_) => {
// handler is not found
self.push_response_entry(StatusCode::NOT_FOUND);
} }
} }
// handler is not found
self.tasks.push_back(Entry {
pipe: EntryPipe::Error(ServerError::err(
Version::HTTP_11,
StatusCode::NOT_FOUND,
)),
flags: EntryFlags::empty(),
});
} }
Ok(Some(Message::Chunk(chunk))) => { Ok(Some(Message::Chunk(chunk))) => {
updated = true;
if let Some(ref mut payload) = self.payload { if let Some(ref mut payload) = self.payload {
payload.feed_data(chunk); payload.feed_data(chunk);
} else { } else {
error!("Internal server error: unexpected payload chunk"); error!("Internal server error: unexpected payload chunk");
self.flags.insert(Flags::ERROR); self.flags.insert(Flags::READ_DISCONNECTED | Flags::STARTED);
self.push_response_entry(StatusCode::INTERNAL_SERVER_ERROR);
self.error = Some(HttpDispatchError::InternalError);
break; break;
} }
} }
Ok(Some(Message::Eof)) => { Ok(Some(Message::Eof)) => {
updated = true;
if let Some(mut payload) = self.payload.take() { if let Some(mut payload) = self.payload.take() {
payload.feed_eof(); payload.feed_eof();
} else { } else {
error!("Internal server error: unexpected eof"); error!("Internal server error: unexpected eof");
self.flags.insert(Flags::ERROR); self.flags.insert(Flags::READ_DISCONNECTED | Flags::STARTED);
self.push_response_entry(StatusCode::INTERNAL_SERVER_ERROR);
self.error = Some(HttpDispatchError::InternalError);
break; break;
} }
} }
Ok(None) => break, Ok(None) => {
if self.flags.contains(Flags::READ_DISCONNECTED) {
self.client_disconnected(true);
}
break;
}
Err(e) => { Err(e) => {
self.flags.insert(Flags::ERROR);
if let Some(mut payload) = self.payload.take() { if let Some(mut payload) = self.payload.take() {
let e = match e { let e = match e {
DecoderError::Io(e) => PayloadError::Io(e), DecoderError::Io(e) => PayloadError::Io(e),
@@ -458,10 +558,22 @@ where
}; };
payload.set_error(e); payload.set_error(e);
} }
// Malformed requests should be responded with 400
self.push_response_entry(StatusCode::BAD_REQUEST);
self.flags.insert(Flags::READ_DISCONNECTED | Flags::STARTED);
self.error = Some(HttpDispatchError::MalformedRequest);
break; break;
} }
} }
} }
if self.ka_timer.is_some() && updated {
if let Some(expire) = self.settings.keep_alive_expire() {
self.ka_expire = expire;
}
}
Ok(updated)
} }
} }
@@ -470,24 +582,28 @@ mod tests {
use std::net::Shutdown; use std::net::Shutdown;
use std::{cmp, io, time}; use std::{cmp, io, time};
use actix::System;
use bytes::{Buf, Bytes, BytesMut}; use bytes::{Buf, Bytes, BytesMut};
use futures::future;
use http::{Method, Version}; use http::{Method, Version};
use tokio_io::{AsyncRead, AsyncWrite}; use tokio_io::{AsyncRead, AsyncWrite};
use super::*; use super::*;
use application::HttpApplication; use application::{App, HttpApplication};
use httpmessage::HttpMessage; use httpmessage::HttpMessage;
use server::h1decoder::Message; use server::h1decoder::Message;
use server::settings::{ServerSettings, WorkerSettings}; use server::handler::IntoHttpHandler;
use server::{Connections, KeepAlive, Request}; use server::settings::{ServerSettings, ServiceConfig};
use server::{KeepAlive, Request};
fn wrk_settings() -> Rc<WorkerSettings<HttpApplication>> { fn wrk_settings() -> ServiceConfig<HttpApplication> {
Rc::new(WorkerSettings::<HttpApplication>::new( ServiceConfig::<HttpApplication>::new(
Vec::new(), App::new().into_handler(),
KeepAlive::Os, KeepAlive::Os,
5000,
2000,
ServerSettings::default(), ServerSettings::default(),
Connections::default(), )
))
} }
impl Message { impl Message {
@@ -584,6 +700,9 @@ mod tests {
fn set_linger(&mut self, _: Option<time::Duration>) -> io::Result<()> { fn set_linger(&mut self, _: Option<time::Duration>) -> io::Result<()> {
Ok(()) Ok(())
} }
fn set_keepalive(&mut self, _: Option<time::Duration>) -> io::Result<()> {
Ok(())
}
} }
impl io::Write for Buffer { impl io::Write for Buffer {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> { fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
@@ -602,28 +721,22 @@ mod tests {
} }
} }
#[test]
fn test_req_parse() {
let buf = Buffer::new("GET /test HTTP/1.1\r\n\r\n");
let readbuf = BytesMut::new();
let settings = Rc::new(wrk_settings());
let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf);
h1.poll_io();
h1.poll_io();
assert_eq!(h1.tasks.len(), 1);
}
#[test] #[test]
fn test_req_parse_err() { fn test_req_parse_err() {
let buf = Buffer::new("GET /test HTTP/1\r\n\r\n"); let mut sys = System::new("test");
let readbuf = BytesMut::new(); let _ = sys.block_on(future::lazy(|| {
let settings = Rc::new(wrk_settings()); let buf = Buffer::new("GET /test HTTP/1\r\n\r\n");
let readbuf = BytesMut::new();
let settings = wrk_settings();
let mut h1 = Http1::new(Rc::clone(&settings), buf, None, readbuf); let mut h1 =
h1.poll_io(); Http1Dispatcher::new(settings.clone(), buf, readbuf, false, None);
h1.poll_io(); assert!(h1.poll_io().is_ok());
assert!(h1.flags.contains(Flags::ERROR)); assert!(h1.poll_io().is_ok());
assert!(h1.flags.contains(Flags::READ_DISCONNECTED));
assert_eq!(h1.tasks.len(), 1);
future::ok::<_, ()>(())
}));
} }
#[test] #[test]

View File

@@ -5,7 +5,7 @@ use futures::{Async, Poll};
use httparse; use httparse;
use super::message::{MessageFlags, Request}; use super::message::{MessageFlags, Request};
use super::settings::WorkerSettings; use super::settings::ServiceConfig;
use error::ParseError; use error::ParseError;
use http::header::{HeaderName, HeaderValue}; use http::header::{HeaderName, HeaderValue};
use http::{header, HttpTryFrom, Method, Uri, Version}; use http::{header, HttpTryFrom, Method, Uri, Version};
@@ -18,6 +18,7 @@ pub(crate) struct H1Decoder {
decoder: Option<EncodingDecoder>, decoder: Option<EncodingDecoder>,
} }
#[derive(Debug)]
pub(crate) enum Message { pub(crate) enum Message {
Message { msg: Request, payload: bool }, Message { msg: Request, payload: bool },
Chunk(Bytes), Chunk(Bytes),
@@ -42,7 +43,7 @@ impl H1Decoder {
} }
pub fn decode<H>( pub fn decode<H>(
&mut self, src: &mut BytesMut, settings: &WorkerSettings<H>, &mut self, src: &mut BytesMut, settings: &ServiceConfig<H>,
) -> Result<Option<Message>, DecoderError> { ) -> Result<Option<Message>, DecoderError> {
// read payload // read payload
if self.decoder.is_some() { if self.decoder.is_some() {
@@ -79,7 +80,7 @@ impl H1Decoder {
} }
fn parse_message<H>( fn parse_message<H>(
&self, buf: &mut BytesMut, settings: &WorkerSettings<H>, &self, buf: &mut BytesMut, settings: &ServiceConfig<H>,
) -> Poll<(Request, Option<EncodingDecoder>), ParseError> { ) -> Poll<(Request, Option<EncodingDecoder>), ParseError> {
// Parse http message // Parse http message
let mut has_upgrade = false; let mut has_upgrade = false;

View File

@@ -1,7 +1,6 @@
// #![cfg_attr(feature = "cargo-clippy", allow(redundant_field_names))] // #![cfg_attr(feature = "cargo-clippy", allow(redundant_field_names))]
use std::io::{self, Write}; use std::io::{self, Write};
use std::rc::Rc;
use bytes::{BufMut, BytesMut}; use bytes::{BufMut, BytesMut};
use futures::{Async, Poll}; use futures::{Async, Poll};
@@ -9,7 +8,7 @@ use tokio_io::AsyncWrite;
use super::helpers; use super::helpers;
use super::output::{Output, ResponseInfo, ResponseLength}; use super::output::{Output, ResponseInfo, ResponseLength};
use super::settings::WorkerSettings; use super::settings::ServiceConfig;
use super::Request; use super::Request;
use super::{Writer, WriterState, MAX_WRITE_BUFFER_SIZE}; use super::{Writer, WriterState, MAX_WRITE_BUFFER_SIZE};
use body::{Binary, Body}; use body::{Binary, Body};
@@ -38,11 +37,11 @@ pub(crate) struct H1Writer<T: AsyncWrite, H: 'static> {
headers_size: u32, headers_size: u32,
buffer: Output, buffer: Output,
buffer_capacity: usize, buffer_capacity: usize,
settings: Rc<WorkerSettings<H>>, settings: ServiceConfig<H>,
} }
impl<T: AsyncWrite, H: 'static> H1Writer<T, H> { impl<T: AsyncWrite, H: 'static> H1Writer<T, H> {
pub fn new(stream: T, settings: Rc<WorkerSettings<H>>) -> H1Writer<T, H> { pub fn new(stream: T, settings: ServiceConfig<H>) -> H1Writer<T, H> {
H1Writer { H1Writer {
flags: Flags::KEEPALIVE, flags: Flags::KEEPALIVE,
written: 0, written: 0,
@@ -63,7 +62,17 @@ impl<T: AsyncWrite, H: 'static> H1Writer<T, H> {
self.flags = Flags::KEEPALIVE; self.flags = Flags::KEEPALIVE;
} }
pub fn disconnected(&mut self) {} pub fn flushed(&mut self) -> bool {
self.buffer.is_empty()
}
pub fn disconnected(&mut self) {
self.flags.insert(Flags::DISCONNECTED);
}
pub fn upgrade(&self) -> bool {
self.flags.contains(Flags::UPGRADE)
}
pub fn keepalive(&self) -> bool { pub fn keepalive(&self) -> bool {
self.flags.contains(Flags::KEEPALIVE) && !self.flags.contains(Flags::UPGRADE) self.flags.contains(Flags::KEEPALIVE) && !self.flags.contains(Flags::UPGRADE)
@@ -167,13 +176,11 @@ impl<T: AsyncWrite, H: 'static> Writer for H1Writer<T, H> {
buffer.extend_from_slice(reason); buffer.extend_from_slice(reason);
// content length // content length
let mut len_is_set = true;
match info.length { match info.length {
ResponseLength::Chunked => { ResponseLength::Chunked => {
buffer.extend_from_slice(b"\r\ntransfer-encoding: chunked\r\n") buffer.extend_from_slice(b"\r\ntransfer-encoding: chunked\r\n")
} }
ResponseLength::Zero => {
buffer.extend_from_slice(b"\r\ncontent-length: 0\r\n")
}
ResponseLength::Length(len) => { ResponseLength::Length(len) => {
helpers::write_content_length(len, &mut buffer) helpers::write_content_length(len, &mut buffer)
} }
@@ -182,6 +189,10 @@ impl<T: AsyncWrite, H: 'static> Writer for H1Writer<T, H> {
write!(buffer.writer(), "{}", len)?; write!(buffer.writer(), "{}", len)?;
buffer.extend_from_slice(b"\r\n"); buffer.extend_from_slice(b"\r\n");
} }
ResponseLength::Zero => {
len_is_set = false;
buffer.extend_from_slice(b"\r\n");
}
ResponseLength::None => buffer.extend_from_slice(b"\r\n"), ResponseLength::None => buffer.extend_from_slice(b"\r\n"),
} }
if let Some(ce) = info.content_encoding { if let Some(ce) = info.content_encoding {
@@ -194,47 +205,57 @@ impl<T: AsyncWrite, H: 'static> Writer for H1Writer<T, H> {
let mut pos = 0; let mut pos = 0;
let mut has_date = false; let mut has_date = false;
let mut remaining = buffer.remaining_mut(); let mut remaining = buffer.remaining_mut();
unsafe { let mut buf = unsafe { &mut *(buffer.bytes_mut() as *mut [u8]) };
let mut buf = &mut *(buffer.bytes_mut() as *mut [u8]); for (key, value) in msg.headers() {
for (key, value) in msg.headers() { match *key {
match *key { TRANSFER_ENCODING => continue,
TRANSFER_ENCODING => continue, CONTENT_ENCODING => if encoding != ContentEncoding::Identity {
CONTENT_ENCODING => if encoding != ContentEncoding::Identity { continue;
continue; },
}, CONTENT_LENGTH => match info.length {
CONTENT_LENGTH => match info.length { ResponseLength::None => (),
ResponseLength::None => (), ResponseLength::Zero => {
_ => continue, len_is_set = true;
},
DATE => {
has_date = true;
} }
_ => (), _ => continue,
},
DATE => {
has_date = true;
} }
_ => (),
}
let v = value.as_ref(); let v = value.as_ref();
let k = key.as_str().as_bytes(); let k = key.as_str().as_bytes();
let len = k.len() + v.len() + 4; let len = k.len() + v.len() + 4;
if len > remaining { if len > remaining {
unsafe {
buffer.advance_mut(pos); buffer.advance_mut(pos);
pos = 0; }
buffer.reserve(len); pos = 0;
remaining = buffer.remaining_mut(); buffer.reserve(len);
remaining = buffer.remaining_mut();
unsafe {
buf = &mut *(buffer.bytes_mut() as *mut _); buf = &mut *(buffer.bytes_mut() as *mut _);
} }
buf[pos..pos + k.len()].copy_from_slice(k);
pos += k.len();
buf[pos..pos + 2].copy_from_slice(b": ");
pos += 2;
buf[pos..pos + v.len()].copy_from_slice(v);
pos += v.len();
buf[pos..pos + 2].copy_from_slice(b"\r\n");
pos += 2;
remaining -= len;
} }
buf[pos..pos + k.len()].copy_from_slice(k);
pos += k.len();
buf[pos..pos + 2].copy_from_slice(b": ");
pos += 2;
buf[pos..pos + v.len()].copy_from_slice(v);
pos += v.len();
buf[pos..pos + 2].copy_from_slice(b"\r\n");
pos += 2;
remaining -= len;
}
unsafe {
buffer.advance_mut(pos); buffer.advance_mut(pos);
} }
if !len_is_set {
buffer.extend_from_slice(b"content-length: 0\r\n")
}
// optimized date header, set_date writes \r\n // optimized date header, set_date writes \r\n
if !has_date { if !has_date {
@@ -268,10 +289,7 @@ impl<T: AsyncWrite, H: 'static> Writer for H1Writer<T, H> {
let pl: &[u8] = payload.as_ref(); let pl: &[u8] = payload.as_ref();
let n = match Self::write_data(&mut self.stream, pl) { let n = match Self::write_data(&mut self.stream, pl) {
Err(err) => { Err(err) => {
if err.kind() == io::ErrorKind::WriteZero { self.disconnected();
self.disconnected();
}
return Err(err); return Err(err);
} }
Ok(val) => val, Ok(val) => val,
@@ -315,14 +333,15 @@ impl<T: AsyncWrite, H: 'static> Writer for H1Writer<T, H> {
#[inline] #[inline]
fn poll_completed(&mut self, shutdown: bool) -> Poll<(), io::Error> { fn poll_completed(&mut self, shutdown: bool) -> Poll<(), io::Error> {
if self.flags.contains(Flags::DISCONNECTED) {
return Err(io::Error::new(io::ErrorKind::Other, "disconnected"));
}
if !self.buffer.is_empty() { if !self.buffer.is_empty() {
let written = { let written = {
match Self::write_data(&mut self.stream, self.buffer.as_ref().as_ref()) { match Self::write_data(&mut self.stream, self.buffer.as_ref().as_ref()) {
Err(err) => { Err(err) => {
if err.kind() == io::ErrorKind::WriteZero { self.disconnected();
self.disconnected();
}
return Err(err); return Err(err);
} }
Ok(val) => val, Ok(val) => val,
@@ -339,7 +358,7 @@ impl<T: AsyncWrite, H: 'static> Writer for H1Writer<T, H> {
self.stream.poll_flush()?; self.stream.poll_flush()?;
self.stream.shutdown() self.stream.shutdown()
} else { } else {
self.stream.poll_flush() Ok(self.stream.poll_flush()?)
} }
} }
} }

View File

@@ -2,7 +2,7 @@ use std::collections::VecDeque;
use std::io::{Read, Write}; use std::io::{Read, Write};
use std::net::SocketAddr; use std::net::SocketAddr;
use std::rc::Rc; use std::rc::Rc;
use std::time::{Duration, Instant}; use std::time::Instant;
use std::{cmp, io, mem}; use std::{cmp, io, mem};
use bytes::{Buf, Bytes}; use bytes::{Buf, Bytes};
@@ -19,15 +19,16 @@ use http::{StatusCode, Version};
use payload::{Payload, PayloadStatus, PayloadWriter}; use payload::{Payload, PayloadStatus, PayloadWriter};
use uri::Url; use uri::Url;
use super::error::ServerError; use super::error::{HttpDispatchError, ServerError};
use super::h2writer::H2Writer; use super::h2writer::H2Writer;
use super::input::PayloadType; use super::input::PayloadType;
use super::settings::WorkerSettings; use super::settings::ServiceConfig;
use super::{HttpHandler, HttpHandlerTask, IoStream, Writer}; use super::{HttpHandler, HttpHandlerTask, IoStream, Writer};
bitflags! { bitflags! {
struct Flags: u8 { struct Flags: u8 {
const DISCONNECTED = 0b0000_0010; const DISCONNECTED = 0b0000_0001;
const SHUTDOWN = 0b0000_0010;
} }
} }
@@ -38,12 +39,13 @@ where
H: HttpHandler + 'static, H: HttpHandler + 'static,
{ {
flags: Flags, flags: Flags,
settings: Rc<WorkerSettings<H>>, settings: ServiceConfig<H>,
addr: Option<SocketAddr>, addr: Option<SocketAddr>,
state: State<IoWrapper<T>>, state: State<IoWrapper<T>>,
tasks: VecDeque<Entry<H>>, tasks: VecDeque<Entry<H>>,
keepalive_timer: Option<Delay>,
extensions: Option<Rc<Extensions>>, extensions: Option<Rc<Extensions>>,
ka_expire: Instant,
ka_timer: Option<Delay>,
} }
enum State<T: AsyncRead + AsyncWrite> { enum State<T: AsyncRead + AsyncWrite> {
@@ -58,9 +60,20 @@ where
H: HttpHandler + 'static, H: HttpHandler + 'static,
{ {
pub fn new( pub fn new(
settings: Rc<WorkerSettings<H>>, io: T, addr: Option<SocketAddr>, buf: Bytes, settings: ServiceConfig<H>, io: T, buf: Bytes, keepalive_timer: Option<Delay>,
) -> Self { ) -> Self {
let addr = io.peer_addr();
let extensions = io.extensions(); let extensions = io.extensions();
// keep-alive timeout
let (ka_expire, ka_timer) = if let Some(delay) = keepalive_timer {
(delay.deadline(), Some(delay))
} else if let Some(delay) = settings.keep_alive_timer() {
(delay.deadline(), Some(delay))
} else {
(settings.now(), None)
};
Http2 { Http2 {
flags: Flags::empty(), flags: Flags::empty(),
tasks: VecDeque::new(), tasks: VecDeque::new(),
@@ -68,39 +81,34 @@ where
unread: if buf.is_empty() { None } else { Some(buf) }, unread: if buf.is_empty() { None } else { Some(buf) },
inner: io, inner: io,
})), })),
keepalive_timer: None,
addr, addr,
settings, settings,
extensions, extensions,
ka_expire,
ka_timer,
} }
} }
pub(crate) fn shutdown(&mut self) { pub(crate) fn shutdown(&mut self) {
self.state = State::Empty; self.state = State::Empty;
self.tasks.clear(); self.tasks.clear();
self.keepalive_timer.take();
} }
pub fn settings(&self) -> &WorkerSettings<H> { pub fn settings(&self) -> &ServiceConfig<H> {
self.settings.as_ref() &self.settings
} }
pub fn poll(&mut self) -> Poll<(), ()> { pub fn poll(&mut self) -> Poll<(), HttpDispatchError> {
self.poll_keepalive()?;
// server // server
if let State::Connection(ref mut conn) = self.state { if let State::Connection(ref mut conn) = self.state {
// keep-alive timer
if let Some(ref mut timeout) = self.keepalive_timer {
match timeout.poll() {
Ok(Async::Ready(_)) => {
trace!("Keep-alive timeout, close connection");
return Ok(Async::Ready(()));
}
Ok(Async::NotReady) => (),
Err(_) => unreachable!(),
}
}
loop { loop {
// shutdown connection
if self.flags.contains(Flags::SHUTDOWN) {
return conn.poll_close().map_err(|e| e.into());
}
let mut not_ready = true; let mut not_ready = true;
let disconnected = self.flags.contains(Flags::DISCONNECTED); let disconnected = self.flags.contains(Flags::DISCONNECTED);
@@ -215,51 +223,30 @@ where
not_ready = false; not_ready = false;
let (parts, body) = req.into_parts(); let (parts, body) = req.into_parts();
// stop keepalive timer // update keep-alive expire
self.keepalive_timer.take(); if self.ka_timer.is_some() {
if let Some(expire) = self.settings.keep_alive_expire() {
self.ka_expire = expire;
}
}
self.tasks.push_back(Entry::new( self.tasks.push_back(Entry::new(
parts, parts,
body, body,
resp, resp,
self.addr, self.addr,
&self.settings, self.settings.clone(),
self.extensions.clone(), self.extensions.clone(),
)); ));
} }
Ok(Async::NotReady) => { Ok(Async::NotReady) => return Ok(Async::NotReady),
// start keep-alive timer
if self.tasks.is_empty() {
if self.settings.keep_alive_enabled() {
let keep_alive = self.settings.keep_alive();
if keep_alive > 0 && self.keepalive_timer.is_none() {
trace!("Start keep-alive timer");
let mut timeout = Delay::new(
Instant::now()
+ Duration::new(keep_alive, 0),
);
// register timeout
let _ = timeout.poll();
self.keepalive_timer = Some(timeout);
}
} else {
// keep-alive disable, drop connection
return conn.poll_close().map_err(|e| {
error!("Error during connection close: {}", e)
});
}
} else {
// keep-alive unset, rely on operating system
return Ok(Async::NotReady);
}
}
Err(err) => { Err(err) => {
trace!("Connection error: {}", err); trace!("Connection error: {}", err);
self.flags.insert(Flags::DISCONNECTED); self.flags.insert(Flags::SHUTDOWN);
for entry in &mut self.tasks { for entry in &mut self.tasks {
entry.task.disconnected() entry.task.disconnected()
} }
self.keepalive_timer.take(); continue;
} }
} }
} }
@@ -267,9 +254,7 @@ where
if not_ready { if not_ready {
if self.tasks.is_empty() && self.flags.contains(Flags::DISCONNECTED) if self.tasks.is_empty() && self.flags.contains(Flags::DISCONNECTED)
{ {
return conn return conn.poll_close().map_err(|e| e.into());
.poll_close()
.map_err(|e| error!("Error during connection close: {}", e));
} else { } else {
return Ok(Async::NotReady); return Ok(Async::NotReady);
} }
@@ -284,7 +269,7 @@ where
Ok(Async::NotReady) => return Ok(Async::NotReady), Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(err) => { Err(err) => {
trace!("Error handling connection: {}", err); trace!("Error handling connection: {}", err);
return Err(()); return Err(err.into());
} }
} }
} else { } else {
@@ -293,6 +278,37 @@ where
self.poll() self.poll()
} }
/// keep-alive timer. returns `true` is keep-alive, otherwise drop
fn poll_keepalive(&mut self) -> Result<(), HttpDispatchError> {
if let Some(ref mut timer) = self.ka_timer {
match timer.poll() {
Ok(Async::Ready(_)) => {
// if we get timer during shutdown, just drop connection
if self.flags.contains(Flags::SHUTDOWN) {
return Err(HttpDispatchError::ShutdownTimeout);
}
if timer.deadline() >= self.ka_expire {
// check for any outstanding request handling
if self.tasks.is_empty() {
return Err(HttpDispatchError::ShutdownTimeout);
} else if let Some(dl) = self.settings.keep_alive_expire() {
timer.reset(dl)
}
} else {
timer.reset(self.ka_expire)
}
}
Ok(Async::NotReady) => (),
Err(e) => {
error!("Timer error {:?}", e);
return Err(HttpDispatchError::Unknown);
}
}
}
Ok(())
}
} }
bitflags! { bitflags! {
@@ -342,7 +358,7 @@ struct Entry<H: HttpHandler + 'static> {
impl<H: HttpHandler + 'static> Entry<H> { impl<H: HttpHandler + 'static> Entry<H> {
fn new( fn new(
parts: Parts, recv: RecvStream, resp: SendResponse<Bytes>, parts: Parts, recv: RecvStream, resp: SendResponse<Bytes>,
addr: Option<SocketAddr>, settings: &Rc<WorkerSettings<H>>, addr: Option<SocketAddr>, settings: ServiceConfig<H>,
extensions: Option<Rc<Extensions>>, extensions: Option<Rc<Extensions>>,
) -> Entry<H> ) -> Entry<H>
where where
@@ -367,28 +383,20 @@ impl<H: HttpHandler + 'static> Entry<H> {
let psender = PayloadType::new(msg.headers(), psender); let psender = PayloadType::new(msg.headers(), psender);
// start request processing // start request processing
let mut task = None; let task = match settings.handler().handle(msg) {
for h in settings.handlers().iter() { Ok(task) => EntryPipe::Task(task),
msg = match h.handle(msg) { Err(_) => EntryPipe::Error(ServerError::err(
Ok(t) => { Version::HTTP_2,
task = Some(t); StatusCode::NOT_FOUND,
break; )),
} };
Err(msg) => msg,
}
}
Entry { Entry {
task: task.map(EntryPipe::Task).unwrap_or_else(|| { task,
EntryPipe::Error(ServerError::err(
Version::HTTP_2,
StatusCode::NOT_FOUND,
))
}),
payload: psender,
stream: H2Writer::new(resp, Rc::clone(settings)),
flags: EntryFlags::empty(),
recv, recv,
payload: psender,
stream: H2Writer::new(resp, settings),
flags: EntryFlags::empty(),
} }
} }

View File

@@ -1,25 +1,27 @@
#![cfg_attr(feature = "cargo-clippy", allow(redundant_field_names))] #![cfg_attr(
feature = "cargo-clippy",
allow(clippy::redundant_field_names)
)]
use std::{cmp, io};
use bytes::{Bytes, BytesMut}; use bytes::{Bytes, BytesMut};
use futures::{Async, Poll}; use futures::{Async, Poll};
use http2::server::SendResponse; use http2::server::SendResponse;
use http2::{Reason, SendStream}; use http2::{Reason, SendStream};
use modhttp::Response; use modhttp::Response;
use std::rc::Rc;
use std::{cmp, io};
use http::{HttpTryFrom, Method, Version};
use super::helpers; use super::helpers;
use super::message::Request; use super::message::Request;
use super::output::{Output, ResponseInfo, ResponseLength}; use super::output::{Output, ResponseInfo, ResponseLength};
use super::settings::WorkerSettings; use super::settings::ServiceConfig;
use super::{Writer, WriterState, MAX_WRITE_BUFFER_SIZE}; use super::{Writer, WriterState, MAX_WRITE_BUFFER_SIZE};
use body::{Binary, Body}; use body::{Binary, Body};
use header::ContentEncoding; use header::ContentEncoding;
use http::header::{ use http::header::{
HeaderValue, CONNECTION, CONTENT_ENCODING, CONTENT_LENGTH, DATE, TRANSFER_ENCODING, HeaderValue, CONNECTION, CONTENT_ENCODING, CONTENT_LENGTH, DATE, TRANSFER_ENCODING,
}; };
use http::{HttpTryFrom, Method, Version};
use httpresponse::HttpResponse; use httpresponse::HttpResponse;
const CHUNK_SIZE: usize = 16_384; const CHUNK_SIZE: usize = 16_384;
@@ -40,13 +42,11 @@ pub(crate) struct H2Writer<H: 'static> {
written: u64, written: u64,
buffer: Output, buffer: Output,
buffer_capacity: usize, buffer_capacity: usize,
settings: Rc<WorkerSettings<H>>, settings: ServiceConfig<H>,
} }
impl<H: 'static> H2Writer<H> { impl<H: 'static> H2Writer<H> {
pub fn new( pub fn new(respond: SendResponse<Bytes>, settings: ServiceConfig<H>) -> H2Writer<H> {
respond: SendResponse<Bytes>, settings: Rc<WorkerSettings<H>>,
) -> H2Writer<H> {
H2Writer { H2Writer {
stream: None, stream: None,
flags: Flags::empty(), flags: Flags::empty(),

208
src/server/handler.rs Normal file
View File

@@ -0,0 +1,208 @@
use futures::{Async, Future, Poll};
use super::message::Request;
use super::Writer;
use error::Error;
/// Low level http request handler
#[allow(unused_variables)]
pub trait HttpHandler: 'static {
/// Request handling task
type Task: HttpHandlerTask;
/// Handle request
fn handle(&self, req: Request) -> Result<Self::Task, Request>;
}
impl HttpHandler for Box<HttpHandler<Task = Box<HttpHandlerTask>>> {
type Task = Box<HttpHandlerTask>;
fn handle(&self, req: Request) -> Result<Box<HttpHandlerTask>, Request> {
self.as_ref().handle(req)
}
}
/// Low level http request handler
pub trait HttpHandlerTask {
/// Poll task, this method is used before or after *io* object is available
fn poll_completed(&mut self) -> Poll<(), Error> {
Ok(Async::Ready(()))
}
/// Poll task when *io* object is available
fn poll_io(&mut self, io: &mut Writer) -> Poll<bool, Error>;
/// Connection is disconnected
fn disconnected(&mut self) {}
}
impl HttpHandlerTask for Box<HttpHandlerTask> {
fn poll_io(&mut self, io: &mut Writer) -> Poll<bool, Error> {
self.as_mut().poll_io(io)
}
}
pub(super) struct HttpHandlerTaskFut<T: HttpHandlerTask> {
task: T,
}
impl<T: HttpHandlerTask> HttpHandlerTaskFut<T> {
pub(crate) fn new(task: T) -> Self {
Self { task }
}
}
impl<T: HttpHandlerTask> Future for HttpHandlerTaskFut<T> {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<(), ()> {
self.task.poll_completed().map_err(|_| ())
}
}
/// Conversion helper trait
pub trait IntoHttpHandler {
/// The associated type which is result of conversion.
type Handler: HttpHandler;
/// Convert into `HttpHandler` object.
fn into_handler(self) -> Self::Handler;
}
impl<T: HttpHandler> IntoHttpHandler for T {
type Handler = T;
fn into_handler(self) -> Self::Handler {
self
}
}
impl<T: IntoHttpHandler> IntoHttpHandler for Vec<T> {
type Handler = VecHttpHandler<T::Handler>;
fn into_handler(self) -> Self::Handler {
VecHttpHandler(self.into_iter().map(|item| item.into_handler()).collect())
}
}
#[doc(hidden)]
pub struct VecHttpHandler<H: HttpHandler>(Vec<H>);
impl<H: HttpHandler> HttpHandler for VecHttpHandler<H> {
type Task = H::Task;
fn handle(&self, mut req: Request) -> Result<Self::Task, Request> {
for h in &self.0 {
req = match h.handle(req) {
Ok(task) => return Ok(task),
Err(e) => e,
};
}
Err(req)
}
}
macro_rules! http_handler ({$EN:ident, $(($n:tt, $T:ident)),+} => {
impl<$($T: HttpHandler,)+> HttpHandler for ($($T,)+) {
type Task = $EN<$($T,)+>;
fn handle(&self, mut req: Request) -> Result<Self::Task, Request> {
$(
req = match self.$n.handle(req) {
Ok(task) => return Ok($EN::$T(task)),
Err(e) => e,
};
)+
Err(req)
}
}
#[doc(hidden)]
pub enum $EN<$($T: HttpHandler,)+> {
$($T ($T::Task),)+
}
impl<$($T: HttpHandler,)+> HttpHandlerTask for $EN<$($T,)+>
{
fn poll_completed(&mut self) -> Poll<(), Error> {
match self {
$($EN :: $T(ref mut task) => task.poll_completed(),)+
}
}
fn poll_io(&mut self, io: &mut Writer) -> Poll<bool, Error> {
match self {
$($EN::$T(ref mut task) => task.poll_io(io),)+
}
}
/// Connection is disconnected
fn disconnected(&mut self) {
match self {
$($EN::$T(ref mut task) => task.disconnected(),)+
}
}
}
});
http_handler!(HttpHandlerTask1, (0, A));
http_handler!(HttpHandlerTask2, (0, A), (1, B));
http_handler!(HttpHandlerTask3, (0, A), (1, B), (2, C));
http_handler!(HttpHandlerTask4, (0, A), (1, B), (2, C), (3, D));
http_handler!(HttpHandlerTask5, (0, A), (1, B), (2, C), (3, D), (4, E));
http_handler!(
HttpHandlerTask6,
(0, A),
(1, B),
(2, C),
(3, D),
(4, E),
(5, F)
);
http_handler!(
HttpHandlerTask7,
(0, A),
(1, B),
(2, C),
(3, D),
(4, E),
(5, F),
(6, G)
);
http_handler!(
HttpHandlerTask8,
(0, A),
(1, B),
(2, C),
(3, D),
(4, E),
(5, F),
(6, G),
(7, H)
);
http_handler!(
HttpHandlerTask9,
(0, A),
(1, B),
(2, C),
(3, D),
(4, E),
(5, F),
(6, G),
(7, H),
(8, I)
);
http_handler!(
HttpHandlerTask10,
(0, A),
(1, B),
(2, C),
(3, D),
(4, E),
(5, F),
(6, G),
(7, H),
(8, I),
(9, J)
);

View File

@@ -29,20 +29,24 @@ pub(crate) fn write_status_line(version: Version, mut n: u16, bytes: &mut BytesM
let lut_ptr = DEC_DIGITS_LUT.as_ptr(); let lut_ptr = DEC_DIGITS_LUT.as_ptr();
let four = n > 999; let four = n > 999;
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe { unsafe {
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(curr), 2); ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(curr), 2);
}
// decode last 1 or 2 chars // decode last 1 or 2 chars
if n < 10 { if n < 10 {
curr -= 1; curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0'; *buf_ptr.offset(curr) = (n as u8) + b'0';
} else { }
let d1 = n << 1; } else {
curr -= 2; let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping( ptr::copy_nonoverlapping(
lut_ptr.offset(d1 as isize), lut_ptr.offset(d1 as isize),
buf_ptr.offset(curr), buf_ptr.offset(curr),
@@ -74,7 +78,7 @@ pub fn write_content_length(mut n: usize, bytes: &mut BytesMut) {
let d1 = n << 1; let d1 = n << 1;
unsafe { unsafe {
ptr::copy_nonoverlapping( ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().offset(d1 as isize), DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(18), buf.as_mut_ptr().offset(18),
2, 2,
); );
@@ -90,7 +94,7 @@ pub fn write_content_length(mut n: usize, bytes: &mut BytesMut) {
n /= 100; n /= 100;
unsafe { unsafe {
ptr::copy_nonoverlapping( ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().offset(d1 as isize), DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(19), buf.as_mut_ptr().offset(19),
2, 2,
) )
@@ -107,47 +111,55 @@ pub fn write_content_length(mut n: usize, bytes: &mut BytesMut) {
} }
pub(crate) fn convert_usize(mut n: usize, bytes: &mut BytesMut) { pub(crate) fn convert_usize(mut n: usize, bytes: &mut BytesMut) {
unsafe { let mut curr: isize = 39;
let mut curr: isize = 39; let mut buf: [u8; 41] = unsafe { mem::uninitialized() };
let mut buf: [u8; 41] = mem::uninitialized(); buf[39] = b'\r';
buf[39] = b'\r'; buf[40] = b'\n';
buf[40] = b'\n'; let buf_ptr = buf.as_mut_ptr();
let buf_ptr = buf.as_mut_ptr(); let lut_ptr = DEC_DIGITS_LUT.as_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
// eagerly decode 4 characters at a time // eagerly decode 4 characters at a time
while n >= 10_000 { while n >= 10_000 {
let rem = (n % 10_000) as isize; let rem = (n % 10_000) as isize;
n /= 10_000; n /= 10_000;
let d1 = (rem / 100) << 1; let d1 = (rem / 100) << 1;
let d2 = (rem % 100) << 1; let d2 = (rem % 100) << 1;
curr -= 4; curr -= 4;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
ptr::copy_nonoverlapping(lut_ptr.offset(d2), buf_ptr.offset(curr + 2), 2); ptr::copy_nonoverlapping(lut_ptr.offset(d2), buf_ptr.offset(curr + 2), 2);
} }
}
// if we reach here numbers are <= 9999, so at most 4 chars long // if we reach here numbers are <= 9999, so at most 4 chars long
let mut n = n as isize; // possibly reduce 64bit math let mut n = n as isize; // possibly reduce 64bit math
// decode 2 more chars, if > 2 chars // decode 2 more chars, if > 2 chars
if n >= 100 { if n >= 100 {
let d1 = (n % 100) << 1; let d1 = (n % 100) << 1;
n /= 100; n /= 100;
curr -= 2; curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
} }
}
// decode last 1 or 2 chars // decode last 1 or 2 chars
if n < 10 { if n < 10 {
curr -= 1; curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0'; *buf_ptr.offset(curr) = (n as u8) + b'0';
} else { }
let d1 = n << 1; } else {
curr -= 2; let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2); ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
} }
}
unsafe {
bytes.extend_from_slice(slice::from_raw_parts( bytes.extend_from_slice(slice::from_raw_parts(
buf_ptr.offset(curr), buf_ptr.offset(curr),
41 - curr as usize, 41 - curr as usize,

View File

@@ -1,47 +1,48 @@
use std::marker::PhantomData; use std::{fmt, io, mem, net};
use std::rc::Rc;
use std::sync::Arc;
use std::{io, mem, net, time};
use actix::{Actor, Addr, Arbiter, AsyncContext, Context, Handler, System}; use actix::{Addr, System};
use actix_net::server::Server;
use actix_net::service::NewService;
use actix_net::ssl;
use futures::{Future, Stream}; use net2::TcpBuilder;
use net2::{TcpBuilder, TcpStreamExt};
use num_cpus; use num_cpus;
use tokio::executor::current_thread;
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_tcp::TcpStream;
#[cfg(feature = "tls")] #[cfg(feature = "tls")]
use native_tls::TlsAcceptor; use native_tls::TlsAcceptor;
#[cfg(feature = "alpn")] #[cfg(any(feature = "alpn", feature = "ssl"))]
use openssl::ssl::SslAcceptorBuilder; use openssl::ssl::SslAcceptorBuilder;
#[cfg(feature = "rust-tls")] #[cfg(feature = "rust-tls")]
use rustls::ServerConfig; use rustls::ServerConfig;
use super::channel::{HttpChannel, WrapperStream}; use super::acceptor::{AcceptorServiceFactory, DefaultAcceptor};
use super::server::{Connections, Server, Service, ServiceHandler}; use super::builder::{HttpServiceBuilder, ServiceProvider};
use super::settings::{ServerSettings, WorkerSettings}; use super::{IntoHttpHandler, KeepAlive};
use super::worker::{Conn, Socket};
use super::{ struct Socket {
AcceptorService, HttpHandler, IntoAsyncIo, IntoHttpHandler, IoStream, KeepAlive, scheme: &'static str,
Token, lst: net::TcpListener,
}; addr: net::SocketAddr,
handler: Box<ServiceProvider>,
}
/// An HTTP Server /// An HTTP Server
/// ///
/// By default it serves HTTP2 when HTTPs is enabled, /// By default it serves HTTP2 when HTTPs is enabled,
/// in order to change it, use `ServerFlags` that can be provided /// in order to change it, use `ServerFlags` that can be provided
/// to acceptor service. /// to acceptor service.
pub struct HttpServer<H> pub struct HttpServer<H, F>
where where
H: IntoHttpHandler + 'static, H: IntoHttpHandler + 'static,
F: Fn() -> H + Send + Clone,
{ {
factory: Arc<Fn() -> Vec<H> + Send + Sync>, pub(super) factory: F,
host: Option<String>, pub(super) host: Option<String>,
keep_alive: KeepAlive, pub(super) keep_alive: KeepAlive,
pub(super) client_timeout: u64,
pub(super) client_shutdown: u64,
backlog: i32, backlog: i32,
threads: usize, threads: usize,
exit: bool, exit: bool,
@@ -51,36 +52,30 @@ where
maxconn: usize, maxconn: usize,
maxconnrate: usize, maxconnrate: usize,
sockets: Vec<Socket>, sockets: Vec<Socket>,
handlers: Vec<Box<IoStreamHandler<H::Handler, net::TcpStream>>>,
} }
impl<H> HttpServer<H> impl<H, F> HttpServer<H, F>
where where
H: IntoHttpHandler + 'static, H: IntoHttpHandler + 'static,
F: Fn() -> H + Send + Clone + 'static,
{ {
/// Create new http server with application factory /// Create new http server with application factory
pub fn new<F, U>(factory: F) -> Self pub fn new(factory: F) -> HttpServer<H, F> {
where
F: Fn() -> U + Sync + Send + 'static,
U: IntoIterator<Item = H> + 'static,
{
let f = move || (factory)().into_iter().collect();
HttpServer { HttpServer {
factory,
threads: num_cpus::get(), threads: num_cpus::get(),
factory: Arc::new(f),
host: None, host: None,
backlog: 2048, backlog: 2048,
keep_alive: KeepAlive::Os, keep_alive: KeepAlive::Timeout(5),
shutdown_timeout: 30, shutdown_timeout: 30,
exit: true, exit: false,
no_http2: false, no_http2: false,
no_signals: false, no_signals: false,
maxconn: 102_400, maxconn: 25_600,
maxconnrate: 256, maxconnrate: 256,
// settings: None, client_timeout: 5000,
client_shutdown: 5000,
sockets: Vec::new(), sockets: Vec::new(),
handlers: Vec::new(),
} }
} }
@@ -113,7 +108,7 @@ where
/// All socket listeners will stop accepting connections when this limit is reached /// All socket listeners will stop accepting connections when this limit is reached
/// for each worker. /// for each worker.
/// ///
/// By default max connections is set to a 100k. /// By default max connections is set to a 25k.
pub fn maxconn(mut self, num: usize) -> Self { pub fn maxconn(mut self, num: usize) -> Self {
self.maxconn = num; self.maxconn = num;
self self
@@ -132,12 +127,39 @@ where
/// Set server keep-alive setting. /// Set server keep-alive setting.
/// ///
/// By default keep alive is set to a `Os`. /// By default keep alive is set to a 5 seconds.
pub fn keep_alive<T: Into<KeepAlive>>(mut self, val: T) -> Self { pub fn keep_alive<T: Into<KeepAlive>>(mut self, val: T) -> Self {
self.keep_alive = val.into(); self.keep_alive = val.into();
self self
} }
/// Set server client timeout in milliseconds for first request.
///
/// Defines a timeout for reading client request header. If a client does not transmit
/// the entire set headers within this time, the request is terminated with
/// the 408 (Request Time-out) error.
///
/// To disable timeout set value to 0.
///
/// By default client timeout is set to 5000 milliseconds.
pub fn client_timeout(mut self, val: u64) -> Self {
self.client_timeout = val;
self
}
/// Set server connection shutdown timeout in milliseconds.
///
/// Defines a timeout for shutdown connection. If a shutdown procedure does not complete
/// within this time, the request is dropped.
///
/// To disable timeout set value to 0.
///
/// By default client timeout is set to 5000 milliseconds.
pub fn client_shutdown(mut self, val: u64) -> Self {
self.client_shutdown = val;
self
}
/// Set server host name. /// Set server host name.
/// ///
/// Host name is used by application router aa a hostname for url /// Host name is used by application router aa a hostname for url
@@ -175,11 +197,6 @@ where
} }
/// Disable `HTTP/2` support /// Disable `HTTP/2` support
// #[doc(hidden)]
// #[deprecated(
// since = "0.7.4",
// note = "please use acceptor service with proper ServerFlags parama"
// )]
pub fn no_http2(mut self) -> Self { pub fn no_http2(mut self) -> Self {
self.no_http2 = true; self.no_http2 = true;
self self
@@ -197,10 +214,7 @@ where
/// and the user should be presented with an enumeration of which /// and the user should be presented with an enumeration of which
/// socket requires which protocol. /// socket requires which protocol.
pub fn addrs_with_scheme(&self) -> Vec<(net::SocketAddr, &str)> { pub fn addrs_with_scheme(&self) -> Vec<(net::SocketAddr, &str)> {
self.handlers self.sockets.iter().map(|s| (s.addr, s.scheme)).collect()
.iter()
.map(|s| (s.addr(), s.scheme()))
.collect()
} }
/// Use listener for accepting incoming connection requests /// Use listener for accepting incoming connection requests
@@ -208,11 +222,16 @@ where
/// HttpServer does not change any configuration for TcpListener, /// HttpServer does not change any configuration for TcpListener,
/// it needs to be configured before passing it to listen() method. /// it needs to be configured before passing it to listen() method.
pub fn listen(mut self, lst: net::TcpListener) -> Self { pub fn listen(mut self, lst: net::TcpListener) -> Self {
let token = Token(self.handlers.len());
let addr = lst.local_addr().unwrap(); let addr = lst.local_addr().unwrap();
self.handlers self.sockets.push(Socket {
.push(Box::new(SimpleHandler::new(lst.local_addr().unwrap()))); lst,
self.sockets.push(Socket { lst, addr, token }); addr,
scheme: "http",
handler: Box::new(HttpServiceBuilder::new(
self.factory.clone(),
DefaultAcceptor,
)),
});
self self
} }
@@ -221,15 +240,16 @@ where
/// Use listener for accepting incoming connection requests /// Use listener for accepting incoming connection requests
pub fn listen_with<A>(mut self, lst: net::TcpListener, acceptor: A) -> Self pub fn listen_with<A>(mut self, lst: net::TcpListener, acceptor: A) -> Self
where where
A: AcceptorService<TcpStream> + Send + 'static, A: AcceptorServiceFactory,
<A::NewService as NewService>::InitError: fmt::Debug,
{ {
let token = Token(self.handlers.len());
let addr = lst.local_addr().unwrap(); let addr = lst.local_addr().unwrap();
self.handlers.push(Box::new(StreamHandler::new( self.sockets.push(Socket {
lst.local_addr().unwrap(), lst,
acceptor, addr,
))); scheme: "https",
self.sockets.push(Socket { lst, addr, token }); handler: Box::new(HttpServiceBuilder::new(self.factory.clone(), acceptor)),
});
self self
} }
@@ -240,36 +260,42 @@ where
/// HttpServer does not change any configuration for TcpListener, /// HttpServer does not change any configuration for TcpListener,
/// it needs to be configured before passing it to listen() method. /// it needs to be configured before passing it to listen() method.
pub fn listen_tls(self, lst: net::TcpListener, acceptor: TlsAcceptor) -> Self { pub fn listen_tls(self, lst: net::TcpListener, acceptor: TlsAcceptor) -> Self {
use super::NativeTlsAcceptor; use actix_net::service::NewServiceExt;
self.listen_with(lst, NativeTlsAcceptor::new(acceptor)) self.listen_with(lst, move || {
ssl::NativeTlsAcceptor::new(acceptor.clone()).map_err(|_| ())
})
} }
#[cfg(feature = "alpn")] #[cfg(any(feature = "alpn", feature = "ssl"))]
/// Use listener for accepting incoming tls connection requests /// Use listener for accepting incoming tls connection requests
/// ///
/// This method sets alpn protocols to "h2" and "http/1.1" /// This method sets alpn protocols to "h2" and "http/1.1"
pub fn listen_ssl( pub fn listen_ssl(
self, lst: net::TcpListener, builder: SslAcceptorBuilder, self, lst: net::TcpListener, builder: SslAcceptorBuilder,
) -> io::Result<Self> { ) -> io::Result<Self> {
use super::{OpensslAcceptor, ServerFlags}; use super::{openssl_acceptor_with_flags, ServerFlags};
use actix_net::service::NewServiceExt;
// alpn support
let flags = if self.no_http2 { let flags = if self.no_http2 {
ServerFlags::HTTP1 ServerFlags::HTTP1
} else { } else {
ServerFlags::HTTP1 | ServerFlags::HTTP2 ServerFlags::HTTP1 | ServerFlags::HTTP2
}; };
Ok(self.listen_with(lst, OpensslAcceptor::with_flags(builder, flags)?)) let acceptor = openssl_acceptor_with_flags(builder, flags)?;
Ok(self.listen_with(lst, move || {
ssl::OpensslAcceptor::new(acceptor.clone()).map_err(|_| ())
}))
} }
#[cfg(feature = "rust-tls")] #[cfg(feature = "rust-tls")]
/// Use listener for accepting incoming tls connection requests /// Use listener for accepting incoming tls connection requests
/// ///
/// This method sets alpn protocols to "h2" and "http/1.1" /// This method sets alpn protocols to "h2" and "http/1.1"
pub fn listen_rustls(self, lst: net::TcpListener, builder: ServerConfig) -> Self { pub fn listen_rustls(self, lst: net::TcpListener, config: ServerConfig) -> Self {
use super::{RustlsAcceptor, ServerFlags}; use super::{RustlsAcceptor, ServerFlags};
use actix_net::service::NewServiceExt;
// alpn support // alpn support
let flags = if self.no_http2 { let flags = if self.no_http2 {
@@ -278,7 +304,9 @@ where
ServerFlags::HTTP1 | ServerFlags::HTTP2 ServerFlags::HTTP1 | ServerFlags::HTTP2
}; };
self.listen_with(lst, RustlsAcceptor::with_flags(builder, flags)) self.listen_with(lst, move || {
RustlsAcceptor::with_flags(config.clone(), flags).map_err(|_| ())
})
} }
/// The socket address to bind /// The socket address to bind
@@ -288,11 +316,7 @@ where
let sockets = self.bind2(addr)?; let sockets = self.bind2(addr)?;
for lst in sockets { for lst in sockets {
let token = Token(self.handlers.len()); self = self.listen(lst);
let addr = lst.local_addr().unwrap();
self.handlers
.push(Box::new(SimpleHandler::new(lst.local_addr().unwrap())));
self.sockets.push(Socket { lst, addr, token })
} }
Ok(self) Ok(self)
@@ -300,22 +324,29 @@ where
/// Start listening for incoming connections with supplied acceptor. /// Start listening for incoming connections with supplied acceptor.
#[doc(hidden)] #[doc(hidden)]
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))] #[cfg_attr(
feature = "cargo-clippy",
allow(clippy::needless_pass_by_value)
)]
pub fn bind_with<S, A>(mut self, addr: S, acceptor: A) -> io::Result<Self> pub fn bind_with<S, A>(mut self, addr: S, acceptor: A) -> io::Result<Self>
where where
S: net::ToSocketAddrs, S: net::ToSocketAddrs,
A: AcceptorService<TcpStream> + Send + 'static, A: AcceptorServiceFactory,
<A::NewService as NewService>::InitError: fmt::Debug,
{ {
let sockets = self.bind2(addr)?; let sockets = self.bind2(addr)?;
for lst in sockets { for lst in sockets {
let token = Token(self.handlers.len());
let addr = lst.local_addr().unwrap(); let addr = lst.local_addr().unwrap();
self.handlers.push(Box::new(StreamHandler::new( self.sockets.push(Socket {
lst.local_addr().unwrap(), lst,
acceptor.clone(), addr,
))); scheme: "https",
self.sockets.push(Socket { lst, addr, token }) handler: Box::new(HttpServiceBuilder::new(
self.factory.clone(),
acceptor.clone(),
)),
});
} }
Ok(self) Ok(self)
@@ -358,12 +389,15 @@ where
pub fn bind_tls<S: net::ToSocketAddrs>( pub fn bind_tls<S: net::ToSocketAddrs>(
self, addr: S, acceptor: TlsAcceptor, self, addr: S, acceptor: TlsAcceptor,
) -> io::Result<Self> { ) -> io::Result<Self> {
use super::NativeTlsAcceptor; use actix_net::service::NewServiceExt;
use actix_net::ssl::NativeTlsAcceptor;
self.bind_with(addr, NativeTlsAcceptor::new(acceptor)) self.bind_with(addr, move || {
NativeTlsAcceptor::new(acceptor.clone()).map_err(|_| ())
})
} }
#[cfg(feature = "alpn")] #[cfg(any(feature = "alpn", feature = "ssl"))]
/// Start listening for incoming tls connections. /// Start listening for incoming tls connections.
/// ///
/// This method sets alpn protocols to "h2" and "http/1.1" /// This method sets alpn protocols to "h2" and "http/1.1"
@@ -371,16 +405,20 @@ where
where where
S: net::ToSocketAddrs, S: net::ToSocketAddrs,
{ {
use super::{OpensslAcceptor, ServerFlags}; use super::{openssl_acceptor_with_flags, ServerFlags};
use actix_net::service::NewServiceExt;
// alpn support // alpn support
let flags = if !self.no_http2 { let flags = if self.no_http2 {
ServerFlags::HTTP1 ServerFlags::HTTP1
} else { } else {
ServerFlags::HTTP1 | ServerFlags::HTTP2 ServerFlags::HTTP1 | ServerFlags::HTTP2
}; };
self.bind_with(addr, OpensslAcceptor::with_flags(builder, flags)?) let acceptor = openssl_acceptor_with_flags(builder, flags)?;
self.bind_with(addr, move || {
ssl::OpensslAcceptor::new(acceptor.clone()).map_err(|_| ())
})
} }
#[cfg(feature = "rust-tls")] #[cfg(feature = "rust-tls")]
@@ -391,76 +429,22 @@ where
self, addr: S, builder: ServerConfig, self, addr: S, builder: ServerConfig,
) -> io::Result<Self> { ) -> io::Result<Self> {
use super::{RustlsAcceptor, ServerFlags}; use super::{RustlsAcceptor, ServerFlags};
use actix_net::service::NewServiceExt;
// alpn support // alpn support
let flags = if !self.no_http2 { let flags = if self.no_http2 {
ServerFlags::HTTP1 ServerFlags::HTTP1
} else { } else {
ServerFlags::HTTP1 | ServerFlags::HTTP2 ServerFlags::HTTP1 | ServerFlags::HTTP2
}; };
self.bind_with(addr, RustlsAcceptor::with_flags(builder, flags)) self.bind_with(addr, move || {
} RustlsAcceptor::with_flags(builder.clone(), flags).map_err(|_| ())
}
impl<H: IntoHttpHandler> Into<(Box<Service>, Vec<(Token, net::TcpListener)>)>
for HttpServer<H>
{
fn into(mut self) -> (Box<Service>, Vec<(Token, net::TcpListener)>) {
let sockets: Vec<_> = mem::replace(&mut self.sockets, Vec::new())
.into_iter()
.map(|item| (item.token, item.lst))
.collect();
(
Box::new(HttpService {
factory: self.factory,
host: self.host,
keep_alive: self.keep_alive,
handlers: self.handlers,
}),
sockets,
)
}
}
struct HttpService<H: IntoHttpHandler> {
factory: Arc<Fn() -> Vec<H> + Send + Sync>,
host: Option<String>,
keep_alive: KeepAlive,
handlers: Vec<Box<IoStreamHandler<H::Handler, net::TcpStream>>>,
}
impl<H: IntoHttpHandler + 'static> Service for HttpService<H> {
fn clone(&self) -> Box<Service> {
Box::new(HttpService {
factory: self.factory.clone(),
host: self.host.clone(),
keep_alive: self.keep_alive,
handlers: self.handlers.iter().map(|v| v.clone()).collect(),
}) })
} }
fn create(&self, conns: Connections) -> Box<ServiceHandler> {
let addr = self.handlers[0].addr();
let s = ServerSettings::new(Some(addr), &self.host, false);
let apps: Vec<_> = (*self.factory)()
.into_iter()
.map(|h| h.into_handler())
.collect();
let handlers = self.handlers.iter().map(|h| h.clone()).collect();
Box::new(HttpServiceHandler::new(
apps,
handlers,
self.keep_alive,
s,
conns,
))
}
} }
impl<H: IntoHttpHandler> HttpServer<H> { impl<H: IntoHttpHandler, F: Fn() -> H + Send + Clone> HttpServer<H, F> {
/// Start listening for incoming connections. /// Start listening for incoming connections.
/// ///
/// This method starts number of http workers in separate threads. /// This method starts number of http workers in separate threads.
@@ -486,11 +470,12 @@ impl<H: IntoHttpHandler> HttpServer<H> {
/// sys.run(); // <- Run actix system, this method starts all async processes /// sys.run(); // <- Run actix system, this method starts all async processes
/// } /// }
/// ``` /// ```
pub fn start(self) -> Addr<Server> { pub fn start(mut self) -> Addr<Server> {
ssl::max_concurrent_ssl_connect(self.maxconnrate);
let mut srv = Server::new() let mut srv = Server::new()
.workers(self.threads) .workers(self.threads)
.maxconn(self.maxconn) .maxconn(self.maxconn)
.maxconnrate(self.maxconnrate)
.shutdown_timeout(self.shutdown_timeout); .shutdown_timeout(self.shutdown_timeout);
srv = if self.exit { srv.system_exit() } else { srv }; srv = if self.exit { srv.system_exit() } else { srv };
@@ -500,7 +485,31 @@ impl<H: IntoHttpHandler> HttpServer<H> {
srv srv
}; };
srv.service(self).start() let sockets = mem::replace(&mut self.sockets, Vec::new());
for socket in sockets {
let host = self
.host
.as_ref()
.map(|h| h.to_owned())
.unwrap_or_else(|| format!("{}", socket.addr));
let (secure, client_shutdown) = if socket.scheme == "https" {
(true, self.client_shutdown)
} else {
(false, 0)
};
srv = socket.handler.register(
srv,
socket.lst,
host,
socket.addr,
self.keep_alive,
secure,
self.client_timeout,
client_shutdown,
);
}
srv.start()
} }
/// Spawn new thread and start listening for incoming connections. /// Spawn new thread and start listening for incoming connections.
@@ -528,277 +537,35 @@ impl<H: IntoHttpHandler> HttpServer<H> {
self.start(); self.start();
sys.run(); sys.run();
} }
}
impl<H: IntoHttpHandler> HttpServer<H> { /// Register current http server as actix-net's server service
/// Start listening for incoming connections from a stream. pub fn register(self, mut srv: Server) -> Server {
/// for socket in self.sockets {
/// This method uses only one thread for handling incoming connections. let host = self
pub fn start_incoming<T, S>(self, stream: S, secure: bool) .host
where .as_ref()
S: Stream<Item = T, Error = io::Error> + Send + 'static, .map(|h| h.to_owned())
T: AsyncRead + AsyncWrite + Send + 'static, .unwrap_or_else(|| format!("{}", socket.addr));
{ let (secure, client_shutdown) = if socket.scheme == "https" {
// set server settings (true, self.client_shutdown)
let addr: net::SocketAddr = "127.0.0.1:8080".parse().unwrap(); } else {
let srv_settings = ServerSettings::new(Some(addr), &self.host, secure); (false, 0)
let apps: Vec<_> = (*self.factory)() };
.into_iter() srv = socket.handler.register(
.map(|h| h.into_handler()) srv,
.collect(); socket.lst,
let settings = WorkerSettings::create( host,
apps, socket.addr,
self.keep_alive, self.keep_alive,
srv_settings, secure,
Connections::default(), self.client_timeout,
); client_shutdown,
);
// start server
HttpIncoming::create(move |ctx| {
ctx.add_message_stream(stream.map_err(|_| ()).map(move |t| Conn {
io: WrapperStream::new(t),
handler: Token::new(0),
token: Token::new(0),
peer: None,
}));
HttpIncoming { settings }
});
}
}
struct HttpIncoming<H: HttpHandler> {
settings: Rc<WorkerSettings<H>>,
}
impl<H> Actor for HttpIncoming<H>
where
H: HttpHandler,
{
type Context = Context<Self>;
}
impl<T, H> Handler<Conn<T>> for HttpIncoming<H>
where
T: IoStream,
H: HttpHandler,
{
type Result = ();
fn handle(&mut self, msg: Conn<T>, _: &mut Context<Self>) -> Self::Result {
Arbiter::spawn(HttpChannel::new(
Rc::clone(&self.settings),
msg.io,
msg.peer,
));
}
}
struct HttpServiceHandler<H>
where
H: HttpHandler + 'static,
{
settings: Rc<WorkerSettings<H>>,
handlers: Vec<Box<IoStreamHandler<H, net::TcpStream>>>,
tcp_ka: Option<time::Duration>,
}
impl<H: HttpHandler + 'static> HttpServiceHandler<H> {
fn new(
apps: Vec<H>, handlers: Vec<Box<IoStreamHandler<H, net::TcpStream>>>,
keep_alive: KeepAlive, settings: ServerSettings, conns: Connections,
) -> HttpServiceHandler<H> {
let tcp_ka = if let KeepAlive::Tcp(val) = keep_alive {
Some(time::Duration::new(val as u64, 0))
} else {
None
};
let settings = WorkerSettings::create(apps, keep_alive, settings, conns);
HttpServiceHandler {
handlers,
tcp_ka,
settings,
} }
srv
} }
} }
impl<H> ServiceHandler for HttpServiceHandler<H>
where
H: HttpHandler + 'static,
{
fn handle(
&mut self, token: Token, io: net::TcpStream, peer: Option<net::SocketAddr>,
) {
if self.tcp_ka.is_some() && io.set_keepalive(self.tcp_ka).is_err() {
error!("Can not set socket keep-alive option");
}
self.handlers[token.0].handle(Rc::clone(&self.settings), io, peer);
}
fn shutdown(&self, force: bool) {
if force {
self.settings.head().traverse::<TcpStream, H>();
}
}
}
struct SimpleHandler<Io> {
addr: net::SocketAddr,
io: PhantomData<Io>,
}
impl<Io: IntoAsyncIo> Clone for SimpleHandler<Io> {
fn clone(&self) -> Self {
SimpleHandler {
addr: self.addr,
io: PhantomData,
}
}
}
impl<Io: IntoAsyncIo> SimpleHandler<Io> {
fn new(addr: net::SocketAddr) -> Self {
SimpleHandler {
addr,
io: PhantomData,
}
}
}
impl<H, Io> IoStreamHandler<H, Io> for SimpleHandler<Io>
where
H: HttpHandler,
Io: IntoAsyncIo + Send + 'static,
Io::Io: IoStream,
{
fn addr(&self) -> net::SocketAddr {
self.addr
}
fn clone(&self) -> Box<IoStreamHandler<H, Io>> {
Box::new(Clone::clone(self))
}
fn scheme(&self) -> &'static str {
"http"
}
fn handle(&self, h: Rc<WorkerSettings<H>>, io: Io, peer: Option<net::SocketAddr>) {
let mut io = match io.into_async_io() {
Ok(io) => io,
Err(err) => {
trace!("Failed to create async io: {}", err);
return;
}
};
let _ = io.set_nodelay(true);
current_thread::spawn(HttpChannel::new(h, io, peer));
}
}
struct StreamHandler<A, Io> {
acceptor: A,
addr: net::SocketAddr,
io: PhantomData<Io>,
}
impl<Io: IntoAsyncIo, A: AcceptorService<Io::Io>> StreamHandler<A, Io> {
fn new(addr: net::SocketAddr, acceptor: A) -> Self {
StreamHandler {
addr,
acceptor,
io: PhantomData,
}
}
}
impl<Io: IntoAsyncIo, A: AcceptorService<Io::Io>> Clone for StreamHandler<A, Io> {
fn clone(&self) -> Self {
StreamHandler {
addr: self.addr,
acceptor: self.acceptor.clone(),
io: PhantomData,
}
}
}
impl<H, Io, A> IoStreamHandler<H, Io> for StreamHandler<A, Io>
where
H: HttpHandler,
Io: IntoAsyncIo + Send + 'static,
Io::Io: IoStream,
A: AcceptorService<Io::Io> + Send + 'static,
{
fn addr(&self) -> net::SocketAddr {
self.addr
}
fn clone(&self) -> Box<IoStreamHandler<H, Io>> {
Box::new(Clone::clone(self))
}
fn scheme(&self) -> &'static str {
self.acceptor.scheme()
}
fn handle(&self, h: Rc<WorkerSettings<H>>, io: Io, peer: Option<net::SocketAddr>) {
let mut io = match io.into_async_io() {
Ok(io) => io,
Err(err) => {
trace!("Failed to create async io: {}", err);
return;
}
};
let _ = io.set_nodelay(true);
let rate = h.connection_rate();
current_thread::spawn(self.acceptor.accept(io).then(move |res| {
drop(rate);
match res {
Ok(io) => current_thread::spawn(HttpChannel::new(h, io, peer)),
Err(err) => trace!("Can not establish connection: {}", err),
}
Ok(())
}))
}
}
impl<H, Io: 'static> IoStreamHandler<H, Io> for Box<IoStreamHandler<H, Io>>
where
H: HttpHandler,
Io: IntoAsyncIo,
{
fn addr(&self) -> net::SocketAddr {
self.as_ref().addr()
}
fn clone(&self) -> Box<IoStreamHandler<H, Io>> {
self.as_ref().clone()
}
fn scheme(&self) -> &'static str {
self.as_ref().scheme()
}
fn handle(&self, h: Rc<WorkerSettings<H>>, io: Io, peer: Option<net::SocketAddr>) {
self.as_ref().handle(h, io, peer)
}
}
trait IoStreamHandler<H, Io>: Send
where
H: HttpHandler,
{
fn clone(&self) -> Box<IoStreamHandler<H, Io>>;
fn addr(&self) -> net::SocketAddr;
fn scheme(&self) -> &'static str;
fn handle(&self, h: Rc<WorkerSettings<H>>, io: Io, peer: Option<net::SocketAddr>);
}
fn create_tcp_listener( fn create_tcp_listener(
addr: net::SocketAddr, backlog: i32, addr: net::SocketAddr, backlog: i32,
) -> io::Result<net::TcpListener> { ) -> io::Result<net::TcpListener> {

69
src/server/incoming.rs Normal file
View File

@@ -0,0 +1,69 @@
//! Support for `Stream<Item=T::AsyncReady+AsyncWrite>`, deprecated!
use std::{io, net};
use actix::{Actor, Arbiter, AsyncContext, Context, Handler, Message};
use futures::{Future, Stream};
use tokio_io::{AsyncRead, AsyncWrite};
use super::channel::{HttpChannel, WrapperStream};
use super::handler::{HttpHandler, IntoHttpHandler};
use super::http::HttpServer;
use super::settings::{ServerSettings, ServiceConfig};
impl<T: AsyncRead + AsyncWrite + 'static> Message for WrapperStream<T> {
type Result = ();
}
impl<H, F> HttpServer<H, F>
where
H: IntoHttpHandler,
F: Fn() -> H + Send + Clone,
{
#[doc(hidden)]
#[deprecated(since = "0.7.8")]
/// Start listening for incoming connections from a stream.
///
/// This method uses only one thread for handling incoming connections.
pub fn start_incoming<T, S>(self, stream: S, secure: bool)
where
S: Stream<Item = T, Error = io::Error> + 'static,
T: AsyncRead + AsyncWrite + 'static,
{
// set server settings
let addr: net::SocketAddr = "127.0.0.1:8080".parse().unwrap();
let apps = (self.factory)().into_handler();
let settings = ServiceConfig::new(
apps,
self.keep_alive,
self.client_timeout,
self.client_shutdown,
ServerSettings::new(addr, "127.0.0.1:8080", secure),
);
// start server
HttpIncoming::create(move |ctx| {
ctx.add_message_stream(stream.map_err(|_| ()).map(WrapperStream::new));
HttpIncoming { settings }
});
}
}
struct HttpIncoming<H: HttpHandler> {
settings: ServiceConfig<H>,
}
impl<H: HttpHandler> Actor for HttpIncoming<H> {
type Context = Context<Self>;
}
impl<T, H> Handler<WrapperStream<T>> for HttpIncoming<H>
where
T: AsyncRead + AsyncWrite,
H: HttpHandler,
{
type Result = ();
fn handle(&mut self, msg: WrapperStream<T>, _: &mut Context<Self>) -> Self::Result {
Arbiter::spawn(HttpChannel::new(self.settings.clone(), msg).map_err(|_| ()));
}
}

View File

@@ -1,5 +1,6 @@
use std::cell::{Cell, Ref, RefCell, RefMut}; use std::cell::{Cell, Ref, RefCell, RefMut};
use std::collections::VecDeque; use std::collections::VecDeque;
use std::fmt;
use std::net::SocketAddr; use std::net::SocketAddr;
use std::rc::Rc; use std::rc::Rc;
@@ -220,6 +221,26 @@ impl Request {
} }
} }
impl fmt::Debug for Request {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
"\nRequest {:?} {}:{}",
self.version(),
self.method(),
self.path()
)?;
if let Some(q) = self.uri().query().as_ref() {
writeln!(f, " query: ?{:?}", q)?;
}
writeln!(f, " headers:")?;
for (key, val) in self.headers().iter() {
writeln!(f, " {:?}: {:?}", key, val)?;
}
Ok(())
}
}
pub(crate) struct RequestPool( pub(crate) struct RequestPool(
RefCell<VecDeque<Rc<InnerRequest>>>, RefCell<VecDeque<Rc<InnerRequest>>>,
RefCell<ServerSettings>, RefCell<ServerSettings>,

View File

@@ -12,7 +12,7 @@
//! to serve incoming HTTP requests. //! to serve incoming HTTP requests.
//! //!
//! As the server uses worker pool, the factory function is restricted to trait bounds //! As the server uses worker pool, the factory function is restricted to trait bounds
//! `Sync + Send + 'static` so that each worker would be able to accept Application //! `Send + Clone + 'static` so that each worker would be able to accept Application
//! without a need for synchronization. //! without a need for synchronization.
//! //!
//! If you wish to share part of state among all workers you should //! If you wish to share part of state among all workers you should
@@ -29,13 +29,9 @@
//! Each TLS implementation is provided with [AcceptorService](trait.AcceptorService.html) //! Each TLS implementation is provided with [AcceptorService](trait.AcceptorService.html)
//! that describes how HTTP Server accepts connections. //! that describes how HTTP Server accepts connections.
//! //!
//! For `bind` and `listen` there are corresponding `bind_with` and `listen_with` that accepts //! For `bind` and `listen` there are corresponding `bind_ssl|tls|rustls` and `listen_ssl|tls|rustls` that accepts
//! these services. //! these services.
//! //!
//! By default, acceptor would work with both HTTP2 and HTTP1 protocols.
//! But it can be controlled using [ServerFlags](struct.ServerFlags.html) which
//! can be supplied when creating `AcceptorService`.
//!
//! **NOTE:** `native-tls` doesn't support `HTTP2` yet //! **NOTE:** `native-tls` doesn't support `HTTP2` yet
//! //!
//! ## Signal handling and shutdown //! ## Signal handling and shutdown
@@ -87,17 +83,13 @@
//! // load ssl keys //! // load ssl keys
//! let config = load_ssl(); //! let config = load_ssl();
//! //!
//! // Create acceptor service for only HTTP1 protocol
//! // You can use ::new(config) to leave defaults
//! let acceptor = server::RustlsAcceptor::with_flags(config, actix_web::server::ServerFlags::HTTP1);
//!
//! // create and start server at once //! // create and start server at once
//! server::new(|| { //! server::new(|| {
//! App::new() //! App::new()
//! // register simple handler, handle all methods //! // register simple handler, handle all methods
//! .resource("/index.html", |r| r.f(index)) //! .resource("/index.html", |r| r.f(index))
//! })) //! }))
//! }).bind_with("127.0.0.1:8080", acceptor) //! }).bind_rustls("127.0.0.1:8443", config)
//! .unwrap() //! .unwrap()
//! .start(); //! .start();
//! //!
@@ -106,17 +98,19 @@
//! let _ = sys.run(); //! let _ = sys.run();
//!} //!}
//! ``` //! ```
use std::net::Shutdown; use std::net::{Shutdown, SocketAddr};
use std::rc::Rc; use std::rc::Rc;
use std::{io, net, time}; use std::{io, time};
use bytes::{BufMut, BytesMut}; use bytes::{BufMut, BytesMut};
use futures::{Async, Future, Poll}; use futures::{Async, Poll};
use tokio_io::{AsyncRead, AsyncWrite}; use tokio_io::{AsyncRead, AsyncWrite};
use tokio_reactor::Handle;
use tokio_tcp::TcpStream; use tokio_tcp::TcpStream;
pub(crate) mod accept; pub use actix_net::server::{PauseServer, ResumeServer, StopServer};
pub(crate) mod acceptor;
pub(crate) mod builder;
mod channel; mod channel;
mod error; mod error;
pub(crate) mod h1; pub(crate) mod h1;
@@ -124,35 +118,38 @@ pub(crate) mod h1decoder;
mod h1writer; mod h1writer;
mod h2; mod h2;
mod h2writer; mod h2writer;
mod handler;
pub(crate) mod helpers; pub(crate) mod helpers;
mod http; mod http;
pub(crate) mod incoming;
pub(crate) mod input; pub(crate) mod input;
pub(crate) mod message; pub(crate) mod message;
pub(crate) mod output; pub(crate) mod output;
mod server; pub(crate) mod service;
pub(crate) mod settings; pub(crate) mod settings;
mod ssl; mod ssl;
mod worker;
use actix::Message;
pub use self::message::Request;
pub use self::handler::*;
pub use self::http::HttpServer; pub use self::http::HttpServer;
#[doc(hidden)] pub use self::message::Request;
pub use self::server::{ pub use self::ssl::*;
ConnectionRateTag, ConnectionTag, Connections, Server, Service, ServiceHandler,
}; pub use self::error::{AcceptorError, HttpDispatchError};
pub use self::settings::ServerSettings; pub use self::settings::ServerSettings;
#[doc(hidden)] #[doc(hidden)]
pub use self::ssl::*; pub use self::acceptor::AcceptorTimeout;
#[doc(hidden)]
pub use self::settings::{ServiceConfig, ServiceConfigBuilder};
#[doc(hidden)]
pub use self::service::{H1Service, HttpService, StreamConfiguration};
#[doc(hidden)] #[doc(hidden)]
pub use self::helpers::write_content_length; pub use self::helpers::write_content_length;
use body::Binary; use body::Binary;
use error::Error;
use extensions::Extensions; use extensions::Extensions;
use header::ContentEncoding; use header::ContentEncoding;
use httpresponse::HttpResponse; use httpresponse::HttpResponse;
@@ -184,10 +181,9 @@ const HW_BUFFER_SIZE: usize = 32_768;
/// sys.run(); /// sys.run();
/// } /// }
/// ``` /// ```
pub fn new<F, U, H>(factory: F) -> HttpServer<H> pub fn new<F, H>(factory: F) -> HttpServer<H, F>
where where
F: Fn() -> U + Sync + Send + 'static, F: Fn() -> H + Send + Clone + 'static,
U: IntoIterator<Item = H> + 'static,
H: IntoHttpHandler + 'static, H: IntoHttpHandler + 'static,
{ {
HttpServer::new(factory) HttpServer::new(factory)
@@ -233,124 +229,6 @@ impl From<Option<usize>> for KeepAlive {
} }
} }
/// Pause accepting incoming connections
///
/// If socket contains some pending connection, they might be dropped.
/// All opened connection remains active.
#[derive(Message)]
pub struct PauseServer;
/// Resume accepting incoming connections
#[derive(Message)]
pub struct ResumeServer;
/// Stop incoming connection processing, stop all workers and exit.
///
/// If server starts with `spawn()` method, then spawned thread get terminated.
pub struct StopServer {
/// Whether to try and shut down gracefully
pub graceful: bool,
}
impl Message for StopServer {
type Result = Result<(), ()>;
}
/// Socket id token
#[doc(hidden)]
#[derive(Clone, Copy)]
pub struct Token(usize);
impl Token {
pub(crate) fn new(val: usize) -> Token {
Token(val)
}
}
/// Low level http request handler
#[allow(unused_variables)]
pub trait HttpHandler: 'static {
/// Request handling task
type Task: HttpHandlerTask;
/// Handle request
fn handle(&self, req: Request) -> Result<Self::Task, Request>;
}
impl HttpHandler for Box<HttpHandler<Task = Box<HttpHandlerTask>>> {
type Task = Box<HttpHandlerTask>;
fn handle(&self, req: Request) -> Result<Box<HttpHandlerTask>, Request> {
self.as_ref().handle(req)
}
}
/// Low level http request handler
pub trait HttpHandlerTask {
/// Poll task, this method is used before or after *io* object is available
fn poll_completed(&mut self) -> Poll<(), Error> {
Ok(Async::Ready(()))
}
/// Poll task when *io* object is available
fn poll_io(&mut self, io: &mut Writer) -> Poll<bool, Error>;
/// Connection is disconnected
fn disconnected(&mut self) {}
}
impl HttpHandlerTask for Box<HttpHandlerTask> {
fn poll_io(&mut self, io: &mut Writer) -> Poll<bool, Error> {
self.as_mut().poll_io(io)
}
}
/// Conversion helper trait
pub trait IntoHttpHandler {
/// The associated type which is result of conversion.
type Handler: HttpHandler;
/// Convert into `HttpHandler` object.
fn into_handler(self) -> Self::Handler;
}
impl<T: HttpHandler> IntoHttpHandler for T {
type Handler = T;
fn into_handler(self) -> Self::Handler {
self
}
}
pub(crate) trait IntoAsyncIo {
type Io: AsyncRead + AsyncWrite;
fn into_async_io(self) -> Result<Self::Io, io::Error>;
}
impl IntoAsyncIo for net::TcpStream {
type Io = TcpStream;
fn into_async_io(self) -> Result<Self::Io, io::Error> {
TcpStream::from_std(self, &Handle::default())
}
}
#[doc(hidden)]
/// Trait implemented by types that could accept incomming socket connections.
pub trait AcceptorService<Io: AsyncRead + AsyncWrite>: Clone {
/// Established connection type
type Accepted: IoStream;
/// Future describes async accept process.
type Future: Future<Item = Self::Accepted, Error = io::Error> + 'static;
/// Establish new connection
fn accept(&self, io: Io) -> Self::Future;
/// Scheme
fn scheme(&self) -> &'static str;
}
#[doc(hidden)] #[doc(hidden)]
#[derive(Debug)] #[derive(Debug)]
pub enum WriterState { pub enum WriterState {
@@ -386,37 +264,47 @@ pub trait Writer {
pub trait IoStream: AsyncRead + AsyncWrite + 'static { pub trait IoStream: AsyncRead + AsyncWrite + 'static {
fn shutdown(&mut self, how: Shutdown) -> io::Result<()>; fn shutdown(&mut self, how: Shutdown) -> io::Result<()>;
/// Returns the socket address of the remote peer of this TCP connection.
fn peer_addr(&self) -> Option<SocketAddr> {
None
}
/// Sets the value of the TCP_NODELAY option on this socket.
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()>; fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()>;
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()>; fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()>;
fn read_available(&mut self, buf: &mut BytesMut) -> Poll<bool, io::Error> { fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()>;
fn read_available(&mut self, buf: &mut BytesMut) -> Poll<(bool, bool), io::Error> {
let mut read_some = false; let mut read_some = false;
loop { loop {
if buf.remaining_mut() < LW_BUFFER_SIZE { if buf.remaining_mut() < LW_BUFFER_SIZE {
buf.reserve(HW_BUFFER_SIZE); buf.reserve(HW_BUFFER_SIZE);
} }
unsafe {
match self.read(buf.bytes_mut()) { let read = unsafe { self.read(buf.bytes_mut()) };
Ok(n) => { match read {
if n == 0 { Ok(n) => {
return Ok(Async::Ready(!read_some)); if n == 0 {
} else { return Ok(Async::Ready((read_some, true)));
read_some = true; } else {
read_some = true;
unsafe {
buf.advance_mut(n); buf.advance_mut(n);
} }
} }
Err(e) => { }
return if e.kind() == io::ErrorKind::WouldBlock { Err(e) => {
if read_some { return if e.kind() == io::ErrorKind::WouldBlock {
Ok(Async::Ready(false)) if read_some {
} else { Ok(Async::Ready((read_some, false)))
Ok(Async::NotReady)
}
} else { } else {
Err(e) Ok(Async::NotReady)
}; }
} } else {
Err(e)
};
} }
} }
} }
@@ -444,6 +332,11 @@ impl IoStream for ::tokio_uds::UnixStream {
fn set_linger(&mut self, _dur: Option<time::Duration>) -> io::Result<()> { fn set_linger(&mut self, _dur: Option<time::Duration>) -> io::Result<()> {
Ok(()) Ok(())
} }
#[inline]
fn set_keepalive(&mut self, _nodelay: bool) -> io::Result<()> {
Ok(())
}
} }
impl IoStream for TcpStream { impl IoStream for TcpStream {
@@ -452,6 +345,11 @@ impl IoStream for TcpStream {
TcpStream::shutdown(self, how) TcpStream::shutdown(self, how)
} }
#[inline]
fn peer_addr(&self) -> Option<SocketAddr> {
TcpStream::peer_addr(self).ok()
}
#[inline] #[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> { fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
TcpStream::set_nodelay(self, nodelay) TcpStream::set_nodelay(self, nodelay)
@@ -461,4 +359,9 @@ impl IoStream for TcpStream {
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> { fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
TcpStream::set_linger(self, dur) TcpStream::set_linger(self, dur)
} }
#[inline]
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
TcpStream::set_keepalive(self, dur)
}
} }

View File

@@ -11,7 +11,7 @@ use flate2::write::{GzEncoder, ZlibEncoder};
#[cfg(feature = "flate2")] #[cfg(feature = "flate2")]
use flate2::Compression; use flate2::Compression;
use http::header::{ACCEPT_ENCODING, CONTENT_LENGTH}; use http::header::{ACCEPT_ENCODING, CONTENT_LENGTH};
use http::Version; use http::{StatusCode, Version};
use super::message::InnerRequest; use super::message::InnerRequest;
use body::{Binary, Body}; use body::{Binary, Body};
@@ -151,10 +151,9 @@ impl Output {
let version = resp.version().unwrap_or_else(|| req.version); let version = resp.version().unwrap_or_else(|| req.version);
let mut len = 0; let mut len = 0;
#[cfg_attr(feature = "cargo-clippy", allow(match_ref_pats))]
let has_body = match resp.body() { let has_body = match resp.body() {
&Body::Empty => false, Body::Empty => false,
&Body::Binary(ref bin) => { Body::Binary(ref bin) => {
len = bin.len(); len = bin.len();
!(response_encoding == ContentEncoding::Auto && len < 96) !(response_encoding == ContentEncoding::Auto && len < 96)
} }
@@ -190,16 +189,19 @@ impl Output {
#[cfg(not(any(feature = "brotli", feature = "flate2")))] #[cfg(not(any(feature = "brotli", feature = "flate2")))]
let mut encoding = ContentEncoding::Identity; let mut encoding = ContentEncoding::Identity;
#[cfg_attr(feature = "cargo-clippy", allow(match_ref_pats))]
let transfer = match resp.body() { let transfer = match resp.body() {
&Body::Empty => { Body::Empty => {
if !info.head { info.length = match resp.status() {
info.length = ResponseLength::Zero; StatusCode::NO_CONTENT
} | StatusCode::CONTINUE
| StatusCode::SWITCHING_PROTOCOLS
| StatusCode::PROCESSING => ResponseLength::None,
_ => ResponseLength::Zero,
};
*self = Output::Empty(buf); *self = Output::Empty(buf);
return; return;
} }
&Body::Binary(_) => { Body::Binary(_) => {
#[cfg(any(feature = "brotli", feature = "flate2"))] #[cfg(any(feature = "brotli", feature = "flate2"))]
{ {
if !(encoding == ContentEncoding::Identity if !(encoding == ContentEncoding::Identity
@@ -244,7 +246,7 @@ impl Output {
} }
return; return;
} }
&Body::Streaming(_) | &Body::Actor(_) => { Body::Streaming(_) | Body::Actor(_) => {
if resp.upgrade() { if resp.upgrade() {
if version == Version::HTTP_2 { if version == Version::HTTP_2 {
error!("Connection upgrade is forbidden for HTTP/2"); error!("Connection upgrade is forbidden for HTTP/2");
@@ -441,7 +443,7 @@ impl ContentEncoder {
} }
} }
#[cfg_attr(feature = "cargo-clippy", allow(inline_always))] #[cfg_attr(feature = "cargo-clippy", allow(clippy::inline_always))]
#[inline(always)] #[inline(always)]
pub fn write_eof(&mut self) -> Result<bool, io::Error> { pub fn write_eof(&mut self) -> Result<bool, io::Error> {
let encoder = let encoder =
@@ -483,7 +485,7 @@ impl ContentEncoder {
} }
} }
#[cfg_attr(feature = "cargo-clippy", allow(inline_always))] #[cfg_attr(feature = "cargo-clippy", allow(clippy::inline_always))]
#[inline(always)] #[inline(always)]
pub fn write(&mut self, data: &[u8]) -> Result<(), io::Error> { pub fn write(&mut self, data: &[u8]) -> Result<(), io::Error> {
match *self { match *self {

View File

@@ -1,528 +0,0 @@
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
};
use std::time::Duration;
use std::{mem, net};
use futures::sync::{mpsc, mpsc::unbounded};
use futures::{Future, Sink, Stream};
use num_cpus;
use actix::{
fut, signal, Actor, ActorFuture, Addr, Arbiter, AsyncContext, Context, Handler,
Response, StreamHandler, System, WrapFuture,
};
use super::accept::{AcceptLoop, AcceptNotify, Command};
use super::worker::{Conn, StopWorker, Worker, WorkerClient};
use super::{PauseServer, ResumeServer, StopServer, Token};
#[doc(hidden)]
/// Describes service that could be used
/// with [Server](struct.Server.html)
pub trait Service: Send + 'static {
/// Clone service
fn clone(&self) -> Box<Service>;
/// Create service handler for this service
fn create(&self, conn: Connections) -> Box<ServiceHandler>;
}
impl Service for Box<Service> {
fn clone(&self) -> Box<Service> {
self.as_ref().clone()
}
fn create(&self, conn: Connections) -> Box<ServiceHandler> {
self.as_ref().create(conn)
}
}
#[doc(hidden)]
/// Describes the way serivce handles incoming
/// TCP connections.
pub trait ServiceHandler {
/// Handle incoming stream
fn handle(
&mut self, token: Token, io: net::TcpStream, peer: Option<net::SocketAddr>,
);
/// Shutdown open handlers
fn shutdown(&self, _: bool) {}
}
pub(crate) enum ServerCommand {
WorkerDied(usize),
}
/// Generic server
#[doc(hidden)]
pub struct Server {
threads: usize,
workers: Vec<(usize, Addr<Worker>)>,
services: Vec<Box<Service>>,
sockets: Vec<Vec<(Token, net::TcpListener)>>,
accept: AcceptLoop,
exit: bool,
shutdown_timeout: u16,
signals: Option<Addr<signal::ProcessSignals>>,
no_signals: bool,
maxconn: usize,
maxconnrate: usize,
}
impl Default for Server {
fn default() -> Self {
Self::new()
}
}
impl Server {
/// Create new Server instance
pub fn new() -> Server {
Server {
threads: num_cpus::get(),
workers: Vec::new(),
services: Vec::new(),
sockets: Vec::new(),
accept: AcceptLoop::new(),
exit: false,
shutdown_timeout: 30,
signals: None,
no_signals: false,
maxconn: 102_400,
maxconnrate: 256,
}
}
/// Set number of workers to start.
///
/// By default http server uses number of available logical cpu as threads
/// count.
pub fn workers(mut self, num: usize) -> Self {
self.threads = num;
self
}
/// Sets the maximum per-worker number of concurrent connections.
///
/// All socket listeners will stop accepting connections when this limit is reached
/// for each worker.
///
/// By default max connections is set to a 100k.
pub fn maxconn(mut self, num: usize) -> Self {
self.maxconn = num;
self
}
/// Sets the maximum per-worker concurrent connection establish process.
///
/// All listeners will stop accepting connections when this limit is reached. It
/// can be used to limit the global SSL CPU usage.
///
/// By default max connections is set to a 256.
pub fn maxconnrate(mut self, num: usize) -> Self {
self.maxconnrate = num;
self
}
/// Stop actix system.
///
/// `SystemExit` message stops currently running system.
pub fn system_exit(mut self) -> Self {
self.exit = true;
self
}
#[doc(hidden)]
/// Set alternative address for `ProcessSignals` actor.
pub fn signals(mut self, addr: Addr<signal::ProcessSignals>) -> Self {
self.signals = Some(addr);
self
}
/// Disable signal handling
pub fn disable_signals(mut self) -> Self {
self.no_signals = true;
self
}
/// Timeout for graceful workers shutdown.
///
/// After receiving a stop signal, workers have this much time to finish
/// serving requests. Workers still alive after the timeout are force
/// dropped.
///
/// By default shutdown timeout sets to 30 seconds.
pub fn shutdown_timeout(mut self, sec: u16) -> Self {
self.shutdown_timeout = sec;
self
}
/// Add new service to server
pub fn service<T>(mut self, srv: T) -> Self
where
T: Into<(Box<Service>, Vec<(Token, net::TcpListener)>)>,
{
let (srv, sockets) = srv.into();
self.services.push(srv);
self.sockets.push(sockets);
self
}
/// Spawn new thread and start listening for incoming connections.
///
/// This method spawns new thread and starts new actix system. Other than
/// that it is similar to `start()` method. This method blocks.
///
/// This methods panics if no socket addresses get bound.
///
/// ```rust,ignore
/// # extern crate futures;
/// # extern crate actix_web;
/// # use futures::Future;
/// use actix_web::*;
///
/// fn main() {
/// Server::new().
/// .service(
/// HttpServer::new(|| App::new().resource("/", |r| r.h(|_| HttpResponse::Ok())))
/// .bind("127.0.0.1:0")
/// .expect("Can not bind to 127.0.0.1:0"))
/// .run();
/// }
/// ```
pub fn run(self) {
let sys = System::new("http-server");
self.start();
sys.run();
}
/// Starts Server Actor and returns its address
pub fn start(mut self) -> Addr<Server> {
if self.sockets.is_empty() {
panic!("Service should have at least one bound socket");
} else {
info!("Starting {} http workers", self.threads);
// start workers
let mut workers = Vec::new();
for idx in 0..self.threads {
let (addr, worker) = self.start_worker(idx, self.accept.get_notify());
workers.push(worker);
self.workers.push((idx, addr));
}
// start accept thread
for sock in &self.sockets {
for s in sock.iter() {
info!("Starting server on http://{:?}", s.1.local_addr().ok());
}
}
let rx = self
.accept
.start(mem::replace(&mut self.sockets, Vec::new()), workers);
// start http server actor
let signals = self.subscribe_to_signals();
let addr = Actor::create(move |ctx| {
ctx.add_stream(rx);
self
});
if let Some(signals) = signals {
signals.do_send(signal::Subscribe(addr.clone().recipient()))
}
addr
}
}
// subscribe to os signals
fn subscribe_to_signals(&self) -> Option<Addr<signal::ProcessSignals>> {
if !self.no_signals {
if let Some(ref signals) = self.signals {
Some(signals.clone())
} else {
Some(System::current().registry().get::<signal::ProcessSignals>())
}
} else {
None
}
}
fn start_worker(
&self, idx: usize, notify: AcceptNotify,
) -> (Addr<Worker>, WorkerClient) {
let (tx, rx) = unbounded::<Conn<net::TcpStream>>();
let conns = Connections::new(notify, self.maxconn, self.maxconnrate);
let worker = WorkerClient::new(idx, tx, conns.clone());
let services: Vec<_> = self.services.iter().map(|v| v.clone()).collect();
let addr = Arbiter::start(move |ctx: &mut Context<_>| {
ctx.add_message_stream(rx);
let handlers: Vec<_> = services
.into_iter()
.map(|s| s.create(conns.clone()))
.collect();
Worker::new(conns, handlers)
});
(addr, worker)
}
}
impl Actor for Server {
type Context = Context<Self>;
}
/// Signals support
/// Handle `SIGINT`, `SIGTERM`, `SIGQUIT` signals and stop actix system
/// message to `System` actor.
impl Handler<signal::Signal> for Server {
type Result = ();
fn handle(&mut self, msg: signal::Signal, ctx: &mut Context<Self>) {
match msg.0 {
signal::SignalType::Int => {
info!("SIGINT received, exiting");
self.exit = true;
Handler::<StopServer>::handle(self, StopServer { graceful: false }, ctx);
}
signal::SignalType::Term => {
info!("SIGTERM received, stopping");
self.exit = true;
Handler::<StopServer>::handle(self, StopServer { graceful: true }, ctx);
}
signal::SignalType::Quit => {
info!("SIGQUIT received, exiting");
self.exit = true;
Handler::<StopServer>::handle(self, StopServer { graceful: false }, ctx);
}
_ => (),
}
}
}
impl Handler<PauseServer> for Server {
type Result = ();
fn handle(&mut self, _: PauseServer, _: &mut Context<Self>) {
self.accept.send(Command::Pause);
}
}
impl Handler<ResumeServer> for Server {
type Result = ();
fn handle(&mut self, _: ResumeServer, _: &mut Context<Self>) {
self.accept.send(Command::Resume);
}
}
impl Handler<StopServer> for Server {
type Result = Response<(), ()>;
fn handle(&mut self, msg: StopServer, ctx: &mut Context<Self>) -> Self::Result {
// stop accept thread
self.accept.send(Command::Stop);
// stop workers
let (tx, rx) = mpsc::channel(1);
let dur = if msg.graceful {
Some(Duration::new(u64::from(self.shutdown_timeout), 0))
} else {
None
};
for worker in &self.workers {
let tx2 = tx.clone();
ctx.spawn(
worker
.1
.send(StopWorker { graceful: dur })
.into_actor(self)
.then(move |_, slf, ctx| {
slf.workers.pop();
if slf.workers.is_empty() {
let _ = tx2.send(());
// we need to stop system if server was spawned
if slf.exit {
ctx.run_later(Duration::from_millis(300), |_, _| {
System::current().stop();
});
}
}
fut::ok(())
}),
);
}
if !self.workers.is_empty() {
Response::async(rx.into_future().map(|_| ()).map_err(|_| ()))
} else {
// we need to stop system if server was spawned
if self.exit {
ctx.run_later(Duration::from_millis(300), |_, _| {
System::current().stop();
});
}
Response::reply(Ok(()))
}
}
}
/// Commands from accept threads
impl StreamHandler<ServerCommand, ()> for Server {
fn finished(&mut self, _: &mut Context<Self>) {}
fn handle(&mut self, msg: ServerCommand, _: &mut Context<Self>) {
match msg {
ServerCommand::WorkerDied(idx) => {
let mut found = false;
for i in 0..self.workers.len() {
if self.workers[i].0 == idx {
self.workers.swap_remove(i);
found = true;
break;
}
}
if found {
error!("Worker has died {:?}, restarting", idx);
let mut new_idx = self.workers.len();
'found: loop {
for i in 0..self.workers.len() {
if self.workers[i].0 == new_idx {
new_idx += 1;
continue 'found;
}
}
break;
}
let (addr, worker) =
self.start_worker(new_idx, self.accept.get_notify());
self.workers.push((new_idx, addr));
self.accept.send(Command::Worker(worker));
}
}
}
}
}
#[derive(Clone, Default)]
///Contains information about connection.
pub struct Connections(Arc<ConnectionsInner>);
impl Connections {
fn new(notify: AcceptNotify, maxconn: usize, maxconnrate: usize) -> Self {
let maxconn_low = if maxconn > 10 { maxconn - 10 } else { 0 };
let maxconnrate_low = if maxconnrate > 10 {
maxconnrate - 10
} else {
0
};
Connections(Arc::new(ConnectionsInner {
notify,
maxconn,
maxconnrate,
maxconn_low,
maxconnrate_low,
conn: AtomicUsize::new(0),
connrate: AtomicUsize::new(0),
}))
}
pub(crate) fn available(&self) -> bool {
self.0.available()
}
pub(crate) fn num_connections(&self) -> usize {
self.0.conn.load(Ordering::Relaxed)
}
/// Report opened connection
pub fn connection(&self) -> ConnectionTag {
ConnectionTag::new(self.0.clone())
}
/// Report rate connection, rate is usually ssl handshake
pub fn connection_rate(&self) -> ConnectionRateTag {
ConnectionRateTag::new(self.0.clone())
}
}
#[derive(Default)]
struct ConnectionsInner {
notify: AcceptNotify,
conn: AtomicUsize,
connrate: AtomicUsize,
maxconn: usize,
maxconnrate: usize,
maxconn_low: usize,
maxconnrate_low: usize,
}
impl ConnectionsInner {
fn available(&self) -> bool {
if self.maxconnrate <= self.connrate.load(Ordering::Relaxed) {
false
} else {
self.maxconn > self.conn.load(Ordering::Relaxed)
}
}
fn notify_maxconn(&self, maxconn: usize) {
if maxconn > self.maxconn_low && maxconn <= self.maxconn {
self.notify.notify();
}
}
fn notify_maxconnrate(&self, connrate: usize) {
if connrate > self.maxconnrate_low && connrate <= self.maxconnrate {
self.notify.notify();
}
}
}
/// Type responsible for max connection stat.
///
/// Max connections stat get updated on drop.
pub struct ConnectionTag(Arc<ConnectionsInner>);
impl ConnectionTag {
fn new(inner: Arc<ConnectionsInner>) -> Self {
inner.conn.fetch_add(1, Ordering::Relaxed);
ConnectionTag(inner)
}
}
impl Drop for ConnectionTag {
fn drop(&mut self) {
let conn = self.0.conn.fetch_sub(1, Ordering::Relaxed);
self.0.notify_maxconn(conn);
}
}
/// Type responsible for max connection rate stat.
///
/// Max connections rate stat get updated on drop.
pub struct ConnectionRateTag(Arc<ConnectionsInner>);
impl ConnectionRateTag {
fn new(inner: Arc<ConnectionsInner>) -> Self {
inner.connrate.fetch_add(1, Ordering::Relaxed);
ConnectionRateTag(inner)
}
}
impl Drop for ConnectionRateTag {
fn drop(&mut self) {
let connrate = self.0.connrate.fetch_sub(1, Ordering::Relaxed);
self.0.notify_maxconnrate(connrate);
}
}

272
src/server/service.rs Normal file
View File

@@ -0,0 +1,272 @@
use std::marker::PhantomData;
use std::time::Duration;
use actix_net::service::{NewService, Service};
use futures::future::{ok, FutureResult};
use futures::{Async, Poll};
use super::channel::{H1Channel, HttpChannel};
use super::error::HttpDispatchError;
use super::handler::HttpHandler;
use super::settings::ServiceConfig;
use super::IoStream;
/// `NewService` implementation for HTTP1/HTTP2 transports
pub struct HttpService<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
settings: ServiceConfig<H>,
_t: PhantomData<Io>,
}
impl<H, Io> HttpService<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
/// Create new `HttpService` instance.
pub fn new(settings: ServiceConfig<H>) -> Self {
HttpService {
settings,
_t: PhantomData,
}
}
}
impl<H, Io> NewService for HttpService<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
type Request = Io;
type Response = ();
type Error = HttpDispatchError;
type InitError = ();
type Service = HttpServiceHandler<H, Io>;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self) -> Self::Future {
ok(HttpServiceHandler::new(self.settings.clone()))
}
}
pub struct HttpServiceHandler<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
settings: ServiceConfig<H>,
_t: PhantomData<Io>,
}
impl<H, Io> HttpServiceHandler<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
fn new(settings: ServiceConfig<H>) -> HttpServiceHandler<H, Io> {
HttpServiceHandler {
settings,
_t: PhantomData,
}
}
}
impl<H, Io> Service for HttpServiceHandler<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
type Request = Io;
type Response = ();
type Error = HttpDispatchError;
type Future = HttpChannel<Io, H>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, req: Self::Request) -> Self::Future {
HttpChannel::new(self.settings.clone(), req)
}
}
/// `NewService` implementation for HTTP1 transport
pub struct H1Service<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
settings: ServiceConfig<H>,
_t: PhantomData<Io>,
}
impl<H, Io> H1Service<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
/// Create new `HttpService` instance.
pub fn new(settings: ServiceConfig<H>) -> Self {
H1Service {
settings,
_t: PhantomData,
}
}
}
impl<H, Io> NewService for H1Service<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
type Request = Io;
type Response = ();
type Error = HttpDispatchError;
type InitError = ();
type Service = H1ServiceHandler<H, Io>;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self) -> Self::Future {
ok(H1ServiceHandler::new(self.settings.clone()))
}
}
/// `Service` implementation for HTTP1 transport
pub struct H1ServiceHandler<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
settings: ServiceConfig<H>,
_t: PhantomData<Io>,
}
impl<H, Io> H1ServiceHandler<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
fn new(settings: ServiceConfig<H>) -> H1ServiceHandler<H, Io> {
H1ServiceHandler {
settings,
_t: PhantomData,
}
}
}
impl<H, Io> Service for H1ServiceHandler<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
type Request = Io;
type Response = ();
type Error = HttpDispatchError;
type Future = H1Channel<Io, H>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, req: Self::Request) -> Self::Future {
H1Channel::new(self.settings.clone(), req)
}
}
/// `NewService` implementation for stream configuration service
///
/// Stream configuration service allows to change some socket level
/// parameters. for example `tcp nodelay` or `tcp keep-alive`.
pub struct StreamConfiguration<T, E> {
no_delay: Option<bool>,
tcp_ka: Option<Option<Duration>>,
_t: PhantomData<(T, E)>,
}
impl<T, E> Default for StreamConfiguration<T, E> {
fn default() -> Self {
Self::new()
}
}
impl<T, E> StreamConfiguration<T, E> {
/// Create new `StreamConfigurationService` instance.
pub fn new() -> Self {
Self {
no_delay: None,
tcp_ka: None,
_t: PhantomData,
}
}
/// Sets the value of the `TCP_NODELAY` option on this socket.
pub fn nodelay(mut self, nodelay: bool) -> Self {
self.no_delay = Some(nodelay);
self
}
/// Sets whether keepalive messages are enabled to be sent on this socket.
pub fn tcp_keepalive(mut self, keepalive: Option<Duration>) -> Self {
self.tcp_ka = Some(keepalive);
self
}
}
impl<T: IoStream, E> NewService for StreamConfiguration<T, E> {
type Request = T;
type Response = T;
type Error = E;
type InitError = ();
type Service = StreamConfigurationService<T, E>;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self) -> Self::Future {
ok(StreamConfigurationService {
no_delay: self.no_delay,
tcp_ka: self.tcp_ka,
_t: PhantomData,
})
}
}
/// Stream configuration service
///
/// Stream configuration service allows to change some socket level
/// parameters. for example `tcp nodelay` or `tcp keep-alive`.
pub struct StreamConfigurationService<T, E> {
no_delay: Option<bool>,
tcp_ka: Option<Option<Duration>>,
_t: PhantomData<(T, E)>,
}
impl<T, E> Service for StreamConfigurationService<T, E>
where
T: IoStream,
{
type Request = T;
type Response = T;
type Error = E;
type Future = FutureResult<T, E>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, mut req: Self::Request) -> Self::Future {
if let Some(no_delay) = self.no_delay {
if req.set_nodelay(no_delay).is_err() {
error!("Can not set socket no-delay option");
}
}
if let Some(keepalive) = self.tcp_ka {
if req.set_keepalive(keepalive).is_err() {
error!("Can not set socket keep-alive option");
}
}
ok(req)
}
}

View File

@@ -1,23 +1,22 @@
use std::cell::{RefCell, RefMut, UnsafeCell}; use std::cell::{Cell, RefCell, RefMut};
use std::collections::VecDeque; use std::collections::VecDeque;
use std::fmt::Write; use std::fmt::Write;
use std::rc::Rc; use std::rc::Rc;
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use std::{env, fmt, net}; use std::{env, fmt, net};
use actix::Arbiter;
use bytes::BytesMut; use bytes::BytesMut;
use futures::Stream; use futures::{future, Future};
use futures_cpupool::CpuPool; use futures_cpupool::CpuPool;
use http::StatusCode; use http::StatusCode;
use lazycell::LazyCell; use lazycell::LazyCell;
use parking_lot::Mutex; use parking_lot::Mutex;
use time; use time;
use tokio_timer::Interval; use tokio_current_thread::spawn;
use tokio_timer::{sleep, Delay};
use super::channel::Node; use super::channel::Node;
use super::message::{Request, RequestPool}; use super::message::{Request, RequestPool};
use super::server::{ConnectionRateTag, ConnectionTag, Connections};
use super::KeepAlive; use super::KeepAlive;
use body::Body; use body::Body;
use httpresponse::{HttpResponse, HttpResponseBuilder, HttpResponsePool}; use httpresponse::{HttpResponse, HttpResponseBuilder, HttpResponsePool};
@@ -44,7 +43,7 @@ lazy_static! {
/// Various server settings /// Various server settings
pub struct ServerSettings { pub struct ServerSettings {
addr: Option<net::SocketAddr>, addr: net::SocketAddr,
secure: bool, secure: bool,
host: String, host: String,
cpu_pool: LazyCell<CpuPool>, cpu_pool: LazyCell<CpuPool>,
@@ -66,7 +65,7 @@ impl Clone for ServerSettings {
impl Default for ServerSettings { impl Default for ServerSettings {
fn default() -> Self { fn default() -> Self {
ServerSettings { ServerSettings {
addr: None, addr: "127.0.0.1:8080".parse().unwrap(),
secure: false, secure: false,
host: "localhost:8080".to_owned(), host: "localhost:8080".to_owned(),
responses: HttpResponsePool::get_pool(), responses: HttpResponsePool::get_pool(),
@@ -78,15 +77,9 @@ impl Default for ServerSettings {
impl ServerSettings { impl ServerSettings {
/// Crate server settings instance /// Crate server settings instance
pub(crate) fn new( pub(crate) fn new(
addr: Option<net::SocketAddr>, host: &Option<String>, secure: bool, addr: net::SocketAddr, host: &str, secure: bool,
) -> ServerSettings { ) -> ServerSettings {
let host = if let Some(ref host) = *host { let host = host.to_owned();
host.clone()
} else if let Some(ref addr) = addr {
format!("{}", addr)
} else {
"localhost".to_owned()
};
let cpu_pool = LazyCell::new(); let cpu_pool = LazyCell::new();
let responses = HttpResponsePool::get_pool(); let responses = HttpResponsePool::get_pool();
ServerSettings { ServerSettings {
@@ -99,7 +92,7 @@ impl ServerSettings {
} }
/// Returns the socket address of the local half of this TCP connection /// Returns the socket address of the local half of this TCP connection
pub fn local_addr(&self) -> Option<net::SocketAddr> { pub fn local_addr(&self) -> net::SocketAddr {
self.addr self.addr
} }
@@ -134,119 +127,300 @@ impl ServerSettings {
// "Sun, 06 Nov 1994 08:49:37 GMT".len() // "Sun, 06 Nov 1994 08:49:37 GMT".len()
const DATE_VALUE_LENGTH: usize = 29; const DATE_VALUE_LENGTH: usize = 29;
pub(crate) struct WorkerSettings<H> { /// Http service configuration
h: Vec<H>, pub struct ServiceConfig<H>(Rc<Inner<H>>);
keep_alive: u64,
struct Inner<H> {
handler: H,
keep_alive: Option<Duration>,
client_timeout: u64,
client_shutdown: u64,
ka_enabled: bool, ka_enabled: bool,
bytes: Rc<SharedBytesPool>, bytes: Rc<SharedBytesPool>,
messages: &'static RequestPool, messages: &'static RequestPool,
conns: Connections,
node: RefCell<Node<()>>, node: RefCell<Node<()>>,
date: UnsafeCell<Date>, date: Cell<Option<Date>>,
} }
impl<H: 'static> WorkerSettings<H> { impl<H> Clone for ServiceConfig<H> {
pub(crate) fn create( fn clone(&self) -> Self {
apps: Vec<H>, keep_alive: KeepAlive, settings: ServerSettings, ServiceConfig(self.0.clone())
conns: Connections,
) -> Rc<WorkerSettings<H>> {
let settings = Rc::new(Self::new(apps, keep_alive, settings, conns));
// periodic date update
let s = settings.clone();
Arbiter::spawn(
Interval::new(Instant::now(), Duration::from_secs(1))
.map_err(|_| ())
.and_then(move |_| {
s.update_date();
Ok(())
}).fold((), |(), _| Ok(())),
);
settings
} }
} }
impl<H> WorkerSettings<H> { impl<H> ServiceConfig<H> {
/// Create instance of `ServiceConfig`
pub(crate) fn new( pub(crate) fn new(
h: Vec<H>, keep_alive: KeepAlive, settings: ServerSettings, conns: Connections, handler: H, keep_alive: KeepAlive, client_timeout: u64, client_shutdown: u64,
) -> WorkerSettings<H> { settings: ServerSettings,
) -> ServiceConfig<H> {
let (keep_alive, ka_enabled) = match keep_alive { let (keep_alive, ka_enabled) = match keep_alive {
KeepAlive::Timeout(val) => (val as u64, true), KeepAlive::Timeout(val) => (val as u64, true),
KeepAlive::Os | KeepAlive::Tcp(_) => (0, true), KeepAlive::Os | KeepAlive::Tcp(_) => (0, true),
KeepAlive::Disabled => (0, false), KeepAlive::Disabled => (0, false),
}; };
let keep_alive = if ka_enabled && keep_alive > 0 {
Some(Duration::from_secs(keep_alive))
} else {
None
};
WorkerSettings { ServiceConfig(Rc::new(Inner {
h, handler,
keep_alive,
ka_enabled,
client_timeout,
client_shutdown,
bytes: Rc::new(SharedBytesPool::new()), bytes: Rc::new(SharedBytesPool::new()),
messages: RequestPool::pool(settings), messages: RequestPool::pool(settings),
node: RefCell::new(Node::head()), node: RefCell::new(Node::head()),
date: UnsafeCell::new(Date::new()), date: Cell::new(None),
keep_alive, }))
ka_enabled,
conns,
}
} }
pub fn head(&self) -> RefMut<Node<()>> { /// Create worker settings builder.
self.node.borrow_mut() pub fn build(handler: H) -> ServiceConfigBuilder<H> {
ServiceConfigBuilder::new(handler)
} }
pub fn handlers(&self) -> &Vec<H> { pub(crate) fn head(&self) -> RefMut<Node<()>> {
&self.h self.0.node.borrow_mut()
} }
pub fn keep_alive(&self) -> u64 { pub(crate) fn handler(&self) -> &H {
self.keep_alive &self.0.handler
} }
#[inline]
/// Keep alive duration if configured.
pub fn keep_alive(&self) -> Option<Duration> {
self.0.keep_alive
}
#[inline]
/// Return state of connection keep-alive funcitonality
pub fn keep_alive_enabled(&self) -> bool { pub fn keep_alive_enabled(&self) -> bool {
self.ka_enabled self.0.ka_enabled
} }
pub fn get_bytes(&self) -> BytesMut { pub(crate) fn get_bytes(&self) -> BytesMut {
self.bytes.get_bytes() self.0.bytes.get_bytes()
} }
pub fn release_bytes(&self, bytes: BytesMut) { pub(crate) fn release_bytes(&self, bytes: BytesMut) {
self.bytes.release_bytes(bytes) self.0.bytes.release_bytes(bytes)
} }
pub fn get_request(&self) -> Request { pub(crate) fn get_request(&self) -> Request {
RequestPool::get(self.messages) RequestPool::get(self.0.messages)
}
pub fn connection(&self) -> ConnectionTag {
self.conns.connection()
}
fn update_date(&self) {
// Unsafe: WorkerSetting is !Sync and !Send
unsafe { &mut *self.date.get() }.update();
}
pub fn set_date(&self, dst: &mut BytesMut, full: bool) {
// Unsafe: WorkerSetting is !Sync and !Send
let date_bytes = unsafe { &(*self.date.get()).bytes };
if full {
let mut buf: [u8; 39] = [0; 39];
buf[..6].copy_from_slice(b"date: ");
buf[6..35].copy_from_slice(date_bytes);
buf[35..].copy_from_slice(b"\r\n\r\n");
dst.extend_from_slice(&buf);
} else {
dst.extend_from_slice(date_bytes);
}
}
#[allow(dead_code)]
pub(crate) fn connection_rate(&self) -> ConnectionRateTag {
self.conns.connection_rate()
} }
} }
impl<H: 'static> ServiceConfig<H> {
#[inline]
/// Client timeout for first request.
pub fn client_timer(&self) -> Option<Delay> {
let delay = self.0.client_timeout;
if delay != 0 {
Some(Delay::new(self.now() + Duration::from_millis(delay)))
} else {
None
}
}
/// Client timeout for first request.
pub fn client_timer_expire(&self) -> Option<Instant> {
let delay = self.0.client_timeout;
if delay != 0 {
Some(self.now() + Duration::from_millis(delay))
} else {
None
}
}
/// Client shutdown timer
pub fn client_shutdown_timer(&self) -> Option<Instant> {
let delay = self.0.client_shutdown;
if delay != 0 {
Some(self.now() + Duration::from_millis(delay))
} else {
None
}
}
#[inline]
/// Return keep-alive timer delay is configured.
pub fn keep_alive_timer(&self) -> Option<Delay> {
if let Some(ka) = self.0.keep_alive {
Some(Delay::new(self.now() + ka))
} else {
None
}
}
/// Keep-alive expire time
pub fn keep_alive_expire(&self) -> Option<Instant> {
if let Some(ka) = self.0.keep_alive {
Some(self.now() + ka)
} else {
None
}
}
fn check_date(&self) {
if unsafe { &*self.0.date.as_ptr() }.is_none() {
self.0.date.set(Some(Date::new()));
// periodic date update
let s = self.clone();
spawn(sleep(Duration::from_millis(500)).then(move |_| {
s.0.date.set(None);
future::ok(())
}));
}
}
pub(crate) fn set_date(&self, dst: &mut BytesMut, full: bool) {
self.check_date();
let date = &unsafe { &*self.0.date.as_ptr() }.as_ref().unwrap().bytes;
if full {
let mut buf: [u8; 39] = [0; 39];
buf[..6].copy_from_slice(b"date: ");
buf[6..35].copy_from_slice(date);
buf[35..].copy_from_slice(b"\r\n\r\n");
dst.extend_from_slice(&buf);
} else {
dst.extend_from_slice(date);
}
}
#[inline]
pub(crate) fn now(&self) -> Instant {
self.check_date();
unsafe { &*self.0.date.as_ptr() }.as_ref().unwrap().current
}
}
/// A service config builder
///
/// This type can be used to construct an instance of `ServiceConfig` through a
/// builder-like pattern.
pub struct ServiceConfigBuilder<H> {
handler: H,
keep_alive: KeepAlive,
client_timeout: u64,
client_shutdown: u64,
host: String,
addr: net::SocketAddr,
secure: bool,
}
impl<H> ServiceConfigBuilder<H> {
/// Create instance of `ServiceConfigBuilder`
pub fn new(handler: H) -> ServiceConfigBuilder<H> {
ServiceConfigBuilder {
handler,
keep_alive: KeepAlive::Timeout(5),
client_timeout: 5000,
client_shutdown: 5000,
secure: false,
host: "localhost".to_owned(),
addr: "127.0.0.1:8080".parse().unwrap(),
}
}
/// Enable secure flag for current server.
///
/// By default this flag is set to false.
pub fn secure(mut self) -> Self {
self.secure = true;
self
}
/// Set server keep-alive setting.
///
/// By default keep alive is set to a 5 seconds.
pub fn keep_alive<T: Into<KeepAlive>>(mut self, val: T) -> Self {
self.keep_alive = val.into();
self
}
/// Set server client timeout in milliseconds for first request.
///
/// Defines a timeout for reading client request header. If a client does not transmit
/// the entire set headers within this time, the request is terminated with
/// the 408 (Request Time-out) error.
///
/// To disable timeout set value to 0.
///
/// By default client timeout is set to 5000 milliseconds.
pub fn client_timeout(mut self, val: u64) -> Self {
self.client_timeout = val;
self
}
/// Set server connection shutdown timeout in milliseconds.
///
/// Defines a timeout for shutdown connection. If a shutdown procedure does not complete
/// within this time, the request is dropped. This timeout affects only secure connections.
///
/// To disable timeout set value to 0.
///
/// By default client timeout is set to 5000 milliseconds.
pub fn client_shutdown(mut self, val: u64) -> Self {
self.client_shutdown = val;
self
}
/// Set server host name.
///
/// Host name is used by application router aa a hostname for url
/// generation. Check [ConnectionInfo](./dev/struct.ConnectionInfo.
/// html#method.host) documentation for more information.
///
/// By default host name is set to a "localhost" value.
pub fn server_hostname(mut self, val: &str) -> Self {
self.host = val.to_owned();
self
}
/// Set server ip address.
///
/// Host name is used by application router aa a hostname for url
/// generation. Check [ConnectionInfo](./dev/struct.ConnectionInfo.
/// html#method.host) documentation for more information.
///
/// By default server address is set to a "127.0.0.1:8080"
pub fn server_address<S: net::ToSocketAddrs>(mut self, addr: S) -> Self {
match addr.to_socket_addrs() {
Err(err) => error!("Can not convert to SocketAddr: {}", err),
Ok(mut addrs) => if let Some(addr) = addrs.next() {
self.addr = addr;
},
}
self
}
/// Finish service configuration and create `ServiceConfig` object.
pub fn finish(self) -> ServiceConfig<H> {
let settings = ServerSettings::new(self.addr, &self.host, self.secure);
let client_shutdown = if self.secure { self.client_shutdown } else { 0 };
ServiceConfig::new(
self.handler,
self.keep_alive,
self.client_timeout,
client_shutdown,
settings,
)
}
}
#[derive(Copy, Clone)]
struct Date { struct Date {
current: Instant,
bytes: [u8; DATE_VALUE_LENGTH], bytes: [u8; DATE_VALUE_LENGTH],
pos: usize, pos: usize,
} }
@@ -254,6 +428,7 @@ struct Date {
impl Date { impl Date {
fn new() -> Date { fn new() -> Date {
let mut date = Date { let mut date = Date {
current: Instant::now(),
bytes: [0; DATE_VALUE_LENGTH], bytes: [0; DATE_VALUE_LENGTH],
pos: 0, pos: 0,
}; };
@@ -262,6 +437,7 @@ impl Date {
} }
fn update(&mut self) { fn update(&mut self) {
self.pos = 0; self.pos = 0;
self.current = Instant::now();
write!(self, "{}", time::at_utc(time::get_time()).rfc822()).unwrap(); write!(self, "{}", time::at_utc(time::get_time()).rfc822()).unwrap();
} }
} }
@@ -303,6 +479,8 @@ impl SharedBytesPool {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use futures::future;
use tokio::runtime::current_thread;
#[test] #[test]
fn test_date_len() { fn test_date_len() {
@@ -311,16 +489,22 @@ mod tests {
#[test] #[test]
fn test_date() { fn test_date() {
let settings = WorkerSettings::<()>::new( let mut rt = current_thread::Runtime::new().unwrap();
Vec::new(),
KeepAlive::Os, let _ = rt.block_on(future::lazy(|| {
ServerSettings::default(), let settings = ServiceConfig::<()>::new(
Connections::default(), (),
); KeepAlive::Os,
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); 0,
settings.set_date(&mut buf1, true); 0,
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10); ServerSettings::default(),
settings.set_date(&mut buf2, true); );
assert_eq!(buf1, buf2); let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf1, true);
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf2, true);
assert_eq!(buf1, buf2);
future::ok::<_, ()>(())
}));
} }
} }

View File

@@ -1,12 +1,10 @@
#[cfg(feature = "alpn")] #[cfg(any(feature = "alpn", feature = "ssl"))]
mod openssl; mod openssl;
#[cfg(feature = "alpn")] #[cfg(any(feature = "alpn", feature = "ssl"))]
pub use self::openssl::OpensslAcceptor; pub use self::openssl::{openssl_acceptor_with_flags, OpensslAcceptor};
#[cfg(feature = "tls")] #[cfg(feature = "tls")]
mod nativetls; mod nativetls;
#[cfg(feature = "tls")]
pub use self::nativetls::{NativeTlsAcceptor, TlsStream};
#[cfg(feature = "rust-tls")] #[cfg(feature = "rust-tls")]
mod rustls; mod rustls;

View File

@@ -1,61 +1,9 @@
use std::net::Shutdown; use std::net::{Shutdown, SocketAddr};
use std::{io, time}; use std::{io, time};
use futures::{Async, Future, Poll}; use actix_net::ssl::TlsStream;
use native_tls::{self, HandshakeError, TlsAcceptor};
use tokio_io::{AsyncRead, AsyncWrite};
use server::{AcceptorService, IoStream}; use server::IoStream;
#[derive(Clone)]
/// Support `SSL` connections via native-tls package
///
/// `tls` feature enables `NativeTlsAcceptor` type
pub struct NativeTlsAcceptor {
acceptor: TlsAcceptor,
}
/// A wrapper around an underlying raw stream which implements the TLS or SSL
/// protocol.
///
/// A `TlsStream<S>` represents a handshake that has been completed successfully
/// and both the server and the client are ready for receiving and sending
/// data. Bytes read from a `TlsStream` are decrypted from `S` and bytes written
/// to a `TlsStream` are encrypted when passing through to `S`.
#[derive(Debug)]
pub struct TlsStream<S> {
inner: native_tls::TlsStream<S>,
}
/// Future returned from `NativeTlsAcceptor::accept` which will resolve
/// once the accept handshake has finished.
pub struct Accept<S> {
inner: Option<Result<native_tls::TlsStream<S>, HandshakeError<S>>>,
}
impl NativeTlsAcceptor {
/// Create `NativeTlsAcceptor` instance
pub fn new(acceptor: TlsAcceptor) -> Self {
NativeTlsAcceptor {
acceptor: acceptor.into(),
}
}
}
impl<Io: IoStream> AcceptorService<Io> for NativeTlsAcceptor {
type Accepted = TlsStream<Io>;
type Future = Accept<Io>;
fn scheme(&self) -> &'static str {
"https"
}
fn accept(&self, io: Io) -> Self::Future {
Accept {
inner: Some(self.acceptor.accept(io)),
}
}
}
impl<Io: IoStream> IoStream for TlsStream<Io> { impl<Io: IoStream> IoStream for TlsStream<Io> {
#[inline] #[inline]
@@ -64,6 +12,11 @@ impl<Io: IoStream> IoStream for TlsStream<Io> {
Ok(()) Ok(())
} }
#[inline]
fn peer_addr(&self) -> Option<SocketAddr> {
self.get_ref().get_ref().peer_addr()
}
#[inline] #[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> { fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
self.get_mut().get_mut().set_nodelay(nodelay) self.get_mut().get_mut().set_nodelay(nodelay)
@@ -73,71 +26,9 @@ impl<Io: IoStream> IoStream for TlsStream<Io> {
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> { fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_linger(dur) self.get_mut().get_mut().set_linger(dur)
} }
}
impl<Io: IoStream> Future for Accept<Io> { #[inline]
type Item = TlsStream<Io>; fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
type Error = io::Error; self.get_mut().get_mut().set_keepalive(dur)
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.inner.take().expect("cannot poll MidHandshake twice") {
Ok(stream) => Ok(TlsStream { inner: stream }.into()),
Err(HandshakeError::Failure(e)) => {
Err(io::Error::new(io::ErrorKind::Other, e))
}
Err(HandshakeError::WouldBlock(s)) => match s.handshake() {
Ok(stream) => Ok(TlsStream { inner: stream }.into()),
Err(HandshakeError::Failure(e)) => {
Err(io::Error::new(io::ErrorKind::Other, e))
}
Err(HandshakeError::WouldBlock(s)) => {
self.inner = Some(Err(HandshakeError::WouldBlock(s)));
Ok(Async::NotReady)
}
},
}
}
}
impl<S> TlsStream<S> {
/// Get access to the internal `native_tls::TlsStream` stream which also
/// transitively allows access to `S`.
pub fn get_ref(&self) -> &native_tls::TlsStream<S> {
&self.inner
}
/// Get mutable access to the internal `native_tls::TlsStream` stream which
/// also transitively allows mutable access to `S`.
pub fn get_mut(&mut self) -> &mut native_tls::TlsStream<S> {
&mut self.inner
}
}
impl<S: io::Read + io::Write> io::Read for TlsStream<S> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf)
}
}
impl<S: io::Read + io::Write> io::Write for TlsStream<S> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.inner.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
impl<S: AsyncRead + AsyncWrite> AsyncRead for TlsStream<S> {}
impl<S: AsyncRead + AsyncWrite> AsyncWrite for TlsStream<S> {
fn shutdown(&mut self) -> Poll<(), io::Error> {
match self.inner.shutdown() {
Ok(_) => (),
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => (),
Err(e) => return Err(e),
}
self.inner.get_mut().shutdown()
} }
} }

View File

@@ -1,80 +1,61 @@
use std::net::Shutdown; use std::net::{Shutdown, SocketAddr};
use std::{io, time}; use std::{io, time};
use futures::{Future, Poll}; use actix_net::ssl;
use openssl::ssl::{AlpnError, SslAcceptor, SslAcceptorBuilder}; use openssl::ssl::{AlpnError, SslAcceptor, SslAcceptorBuilder};
use tokio_openssl::{AcceptAsync, SslAcceptorExt, SslStream}; use tokio_io::{AsyncRead, AsyncWrite};
use tokio_openssl::SslStream;
use server::{AcceptorService, IoStream, ServerFlags}; use server::{IoStream, ServerFlags};
#[derive(Clone)]
/// Support `SSL` connections via openssl package /// Support `SSL` connections via openssl package
/// ///
/// `alpn` feature enables `OpensslAcceptor` type /// `ssl` feature enables `OpensslAcceptor` type
pub struct OpensslAcceptor { pub struct OpensslAcceptor<T> {
acceptor: SslAcceptor, _t: ssl::OpensslAcceptor<T>,
} }
impl OpensslAcceptor { impl<T: AsyncRead + AsyncWrite> OpensslAcceptor<T> {
/// Create `OpensslAcceptor` with enabled `HTTP/2` and `HTTP1.1` support. /// Create `OpensslAcceptor` with enabled `HTTP/2` and `HTTP1.1` support.
pub fn new(builder: SslAcceptorBuilder) -> io::Result<Self> { pub fn new(builder: SslAcceptorBuilder) -> io::Result<ssl::OpensslAcceptor<T>> {
OpensslAcceptor::with_flags(builder, ServerFlags::HTTP1 | ServerFlags::HTTP2) OpensslAcceptor::with_flags(builder, ServerFlags::HTTP1 | ServerFlags::HTTP2)
} }
/// Create `OpensslAcceptor` with custom server flags. /// Create `OpensslAcceptor` with custom server flags.
pub fn with_flags( pub fn with_flags(
mut builder: SslAcceptorBuilder, flags: ServerFlags, builder: SslAcceptorBuilder, flags: ServerFlags,
) -> io::Result<Self> { ) -> io::Result<ssl::OpensslAcceptor<T>> {
let mut protos = Vec::new(); let acceptor = openssl_acceptor_with_flags(builder, flags)?;
if flags.contains(ServerFlags::HTTP1) {
protos.extend(b"\x08http/1.1");
}
if flags.contains(ServerFlags::HTTP2) {
protos.extend(b"\x02h2");
builder.set_alpn_select_callback(|_, protos| {
const H2: &[u8] = b"\x02h2";
if protos.windows(3).any(|window| window == H2) {
Ok(b"h2")
} else {
Err(AlpnError::NOACK)
}
});
}
if !protos.is_empty() { Ok(ssl::OpensslAcceptor::new(acceptor))
builder.set_alpn_protos(&protos)?;
}
Ok(OpensslAcceptor {
acceptor: builder.build(),
})
} }
} }
pub struct AcceptorFut<Io>(AcceptAsync<Io>); /// Configure `SslAcceptorBuilder` with custom server flags.
pub fn openssl_acceptor_with_flags(
impl<Io: IoStream> Future for AcceptorFut<Io> { mut builder: SslAcceptorBuilder, flags: ServerFlags,
type Item = SslStream<Io>; ) -> io::Result<SslAcceptor> {
type Error = io::Error; let mut protos = Vec::new();
if flags.contains(ServerFlags::HTTP1) {
fn poll(&mut self) -> Poll<Self::Item, Self::Error> { protos.extend(b"\x08http/1.1");
self.0
.poll()
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
} }
} if flags.contains(ServerFlags::HTTP2) {
protos.extend(b"\x02h2");
impl<Io: IoStream> AcceptorService<Io> for OpensslAcceptor { builder.set_alpn_select_callback(|_, protos| {
type Accepted = SslStream<Io>; const H2: &[u8] = b"\x02h2";
type Future = AcceptorFut<Io>; if protos.windows(3).any(|window| window == H2) {
Ok(b"h2")
fn scheme(&self) -> &'static str { } else {
"https" Err(AlpnError::NOACK)
}
});
} }
fn accept(&self, io: Io) -> Self::Future { if !protos.is_empty() {
AcceptorFut(SslAcceptorExt::accept_async(&self.acceptor, io)) builder.set_alpn_protos(&protos)?;
} }
Ok(builder.build())
} }
impl<T: IoStream> IoStream for SslStream<T> { impl<T: IoStream> IoStream for SslStream<T> {
@@ -84,6 +65,11 @@ impl<T: IoStream> IoStream for SslStream<T> {
Ok(()) Ok(())
} }
#[inline]
fn peer_addr(&self) -> Option<SocketAddr> {
self.get_ref().get_ref().peer_addr()
}
#[inline] #[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> { fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
self.get_mut().get_mut().set_nodelay(nodelay) self.get_mut().get_mut().set_nodelay(nodelay)
@@ -93,4 +79,9 @@ impl<T: IoStream> IoStream for SslStream<T> {
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> { fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_linger(dur) self.get_mut().get_mut().set_linger(dur)
} }
#[inline]
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_keepalive(dur)
}
} }

View File

@@ -1,29 +1,25 @@
use std::net::Shutdown; use std::net::{Shutdown, SocketAddr};
use std::sync::Arc;
use std::{io, time}; use std::{io, time};
use actix_net::ssl; //::RustlsAcceptor;
use rustls::{ClientSession, ServerConfig, ServerSession}; use rustls::{ClientSession, ServerConfig, ServerSession};
use tokio_io::AsyncWrite; use tokio_io::{AsyncRead, AsyncWrite};
use tokio_rustls::{AcceptAsync, ServerConfigExt, TlsStream}; use tokio_rustls::TlsStream;
use server::{AcceptorService, IoStream, ServerFlags}; use server::{IoStream, ServerFlags};
#[derive(Clone)]
/// Support `SSL` connections via rustls package /// Support `SSL` connections via rustls package
/// ///
/// `rust-tls` feature enables `RustlsAcceptor` type /// `rust-tls` feature enables `RustlsAcceptor` type
pub struct RustlsAcceptor { pub struct RustlsAcceptor<T> {
config: Arc<ServerConfig>, _t: ssl::RustlsAcceptor<T>,
} }
impl RustlsAcceptor { impl<T: AsyncRead + AsyncWrite> RustlsAcceptor<T> {
/// Create `OpensslAcceptor` with enabled `HTTP/2` and `HTTP1.1` support. /// Create `RustlsAcceptor` with custom server flags.
pub fn new(config: ServerConfig) -> Self { pub fn with_flags(
RustlsAcceptor::with_flags(config, ServerFlags::HTTP1 | ServerFlags::HTTP2) mut config: ServerConfig, flags: ServerFlags,
} ) -> ssl::RustlsAcceptor<T> {
/// Create `OpensslAcceptor` with custom server flags.
pub fn with_flags(mut config: ServerConfig, flags: ServerFlags) -> Self {
let mut protos = Vec::new(); let mut protos = Vec::new();
if flags.contains(ServerFlags::HTTP2) { if flags.contains(ServerFlags::HTTP2) {
protos.push("h2".to_string()); protos.push("h2".to_string());
@@ -35,22 +31,7 @@ impl RustlsAcceptor {
config.set_protocols(&protos); config.set_protocols(&protos);
} }
RustlsAcceptor { ssl::RustlsAcceptor::new(config)
config: Arc::new(config),
}
}
}
impl<Io: IoStream> AcceptorService<Io> for RustlsAcceptor {
type Accepted = TlsStream<Io, ServerSession>;
type Future = AcceptAsync<Io>;
fn scheme(&self) -> &'static str {
"https"
}
fn accept(&self, io: Io) -> Self::Future {
ServerConfigExt::accept_async(&self.config, io)
} }
} }
@@ -70,6 +51,11 @@ impl<Io: IoStream> IoStream for TlsStream<Io, ClientSession> {
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> { fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().0.set_linger(dur) self.get_mut().0.set_linger(dur)
} }
#[inline]
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().0.set_keepalive(dur)
}
} }
impl<Io: IoStream> IoStream for TlsStream<Io, ServerSession> { impl<Io: IoStream> IoStream for TlsStream<Io, ServerSession> {
@@ -79,6 +65,11 @@ impl<Io: IoStream> IoStream for TlsStream<Io, ServerSession> {
Ok(()) Ok(())
} }
#[inline]
fn peer_addr(&self) -> Option<SocketAddr> {
self.get_ref().0.peer_addr()
}
#[inline] #[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> { fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
self.get_mut().0.set_nodelay(nodelay) self.get_mut().0.set_nodelay(nodelay)
@@ -88,4 +79,9 @@ impl<Io: IoStream> IoStream for TlsStream<Io, ServerSession> {
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> { fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().0.set_linger(dur) self.get_mut().0.set_linger(dur)
} }
#[inline]
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().0.set_keepalive(dur)
}
} }

View File

@@ -1,139 +0,0 @@
use std::{net, time};
use futures::sync::mpsc::{SendError, UnboundedSender};
use futures::sync::oneshot;
use futures::Future;
use actix::msgs::StopArbiter;
use actix::{Actor, Arbiter, AsyncContext, Context, Handler, Message, Response};
use super::server::{Connections, ServiceHandler};
use super::Token;
#[derive(Message)]
pub(crate) struct Conn<T> {
pub io: T,
pub handler: Token,
pub token: Token,
pub peer: Option<net::SocketAddr>,
}
pub(crate) struct Socket {
pub lst: net::TcpListener,
pub addr: net::SocketAddr,
pub token: Token,
}
#[derive(Clone)]
pub(crate) struct WorkerClient {
pub idx: usize,
tx: UnboundedSender<Conn<net::TcpStream>>,
conns: Connections,
}
impl WorkerClient {
pub fn new(
idx: usize, tx: UnboundedSender<Conn<net::TcpStream>>, conns: Connections,
) -> Self {
WorkerClient { idx, tx, conns }
}
pub fn send(
&self, msg: Conn<net::TcpStream>,
) -> Result<(), SendError<Conn<net::TcpStream>>> {
self.tx.unbounded_send(msg)
}
pub fn available(&self) -> bool {
self.conns.available()
}
}
/// Stop worker message. Returns `true` on successful shutdown
/// and `false` if some connections still alive.
pub(crate) struct StopWorker {
pub graceful: Option<time::Duration>,
}
impl Message for StopWorker {
type Result = Result<bool, ()>;
}
/// Http worker
///
/// Worker accepts Socket objects via unbounded channel and start requests
/// processing.
pub(crate) struct Worker {
conns: Connections,
handlers: Vec<Box<ServiceHandler>>,
}
impl Actor for Worker {
type Context = Context<Self>;
}
impl Worker {
pub(crate) fn new(conns: Connections, handlers: Vec<Box<ServiceHandler>>) -> Self {
Worker { conns, handlers }
}
fn shutdown(&self, force: bool) {
self.handlers.iter().for_each(|h| h.shutdown(force));
}
fn shutdown_timeout(
&self, ctx: &mut Context<Worker>, tx: oneshot::Sender<bool>, dur: time::Duration,
) {
// sleep for 1 second and then check again
ctx.run_later(time::Duration::new(1, 0), move |slf, ctx| {
let num = slf.conns.num_connections();
if num == 0 {
let _ = tx.send(true);
Arbiter::current().do_send(StopArbiter(0));
} else if let Some(d) = dur.checked_sub(time::Duration::new(1, 0)) {
slf.shutdown_timeout(ctx, tx, d);
} else {
info!("Force shutdown http worker, {} connections", num);
slf.shutdown(true);
let _ = tx.send(false);
Arbiter::current().do_send(StopArbiter(0));
}
});
}
}
impl Handler<Conn<net::TcpStream>> for Worker {
type Result = ();
fn handle(&mut self, msg: Conn<net::TcpStream>, _: &mut Context<Self>) {
self.handlers[msg.handler.0].handle(msg.token, msg.io, msg.peer)
}
}
/// `StopWorker` message handler
impl Handler<StopWorker> for Worker {
type Result = Response<bool, ()>;
fn handle(&mut self, msg: StopWorker, ctx: &mut Context<Self>) -> Self::Result {
let num = self.conns.num_connections();
if num == 0 {
info!("Shutting down http worker, 0 connections");
Response::reply(Ok(true))
} else if let Some(dur) = msg.graceful {
self.shutdown(false);
let (tx, rx) = oneshot::channel();
let num = self.conns.num_connections();
if num != 0 {
info!("Graceful http worker shutdown, {} connections", num);
self.shutdown_timeout(ctx, tx, dur);
Response::reply(Ok(true))
} else {
Response::async(rx.map_err(|_| ()))
}
} else {
info!("Force shutdown http worker, {} connections", num);
self.shutdown(true);
Response::reply(Ok(false))
}
}
}

View File

@@ -13,14 +13,10 @@ use http::{HeaderMap, HttpTryFrom, Method, Uri, Version};
use net2::TcpBuilder; use net2::TcpBuilder;
use tokio::runtime::current_thread::Runtime; use tokio::runtime::current_thread::Runtime;
#[cfg(feature = "alpn")] #[cfg(any(feature = "alpn", feature = "ssl"))]
use openssl::ssl::SslAcceptorBuilder; use openssl::ssl::SslAcceptorBuilder;
#[cfg(feature = "rust-tls")] #[cfg(feature = "rust-tls")]
use rustls::ServerConfig; use rustls::ServerConfig;
#[cfg(feature = "alpn")]
use server::OpensslAcceptor;
#[cfg(feature = "rust-tls")]
use server::RustlsAcceptor;
use application::{App, HttpApplication}; use application::{App, HttpApplication};
use body::Binary; use body::Binary;
@@ -79,13 +75,13 @@ impl TestServer {
/// middlewares or set handlers for test application. /// middlewares or set handlers for test application.
pub fn new<F>(config: F) -> Self pub fn new<F>(config: F) -> Self
where where
F: Sync + Send + 'static + Fn(&mut TestApp<()>), F: Clone + Send + 'static + Fn(&mut TestApp<()>),
{ {
TestServerBuilder::new(|| ()).start(config) TestServerBuilder::new(|| ()).start(config)
} }
/// Create test server builder /// Create test server builder
pub fn build() -> TestServerBuilder<()> { pub fn build() -> TestServerBuilder<(), impl Fn() -> () + Clone + Send + 'static> {
TestServerBuilder::new(|| ()) TestServerBuilder::new(|| ())
} }
@@ -94,19 +90,18 @@ impl TestServer {
/// This method can be used for constructing application state. /// This method can be used for constructing application state.
/// Also it can be used for external dependency initialization, /// Also it can be used for external dependency initialization,
/// like creating sync actors for diesel integration. /// like creating sync actors for diesel integration.
pub fn build_with_state<F, S>(state: F) -> TestServerBuilder<S> pub fn build_with_state<S, F>(state: F) -> TestServerBuilder<S, F>
where where
F: Fn() -> S + Sync + Send + 'static, F: Fn() -> S + Clone + Send + 'static,
S: 'static, S: 'static,
{ {
TestServerBuilder::new(state) TestServerBuilder::new(state)
} }
/// Start new test server with application factory /// Start new test server with application factory
pub fn with_factory<F, U, H>(factory: F) -> Self pub fn with_factory<F, H>(factory: F) -> Self
where where
F: Fn() -> U + Sync + Send + 'static, F: Fn() -> H + Send + Clone + 'static,
U: IntoIterator<Item = H> + 'static,
H: IntoHttpHandler + 'static, H: IntoHttpHandler + 'static,
{ {
let (tx, rx) = mpsc::channel(); let (tx, rx) = mpsc::channel();
@@ -117,9 +112,10 @@ impl TestServer {
let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap(); let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap();
let local_addr = tcp.local_addr().unwrap(); let local_addr = tcp.local_addr().unwrap();
HttpServer::new(factory) let _ = HttpServer::new(factory)
.disable_signals() .disable_signals()
.listen(tcp) .listen(tcp)
.keep_alive(5)
.start(); .start();
tx.send((System::current(), local_addr, TestServer::get_conn())) tx.send((System::current(), local_addr, TestServer::get_conn()))
@@ -138,7 +134,7 @@ impl TestServer {
} }
fn get_conn() -> Addr<ClientConnector> { fn get_conn() -> Addr<ClientConnector> {
#[cfg(feature = "alpn")] #[cfg(any(feature = "alpn", feature = "ssl"))]
{ {
use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode}; use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
@@ -146,7 +142,10 @@ impl TestServer {
builder.set_verify(SslVerifyMode::NONE); builder.set_verify(SslVerifyMode::NONE);
ClientConnector::with_connector(builder.build()).start() ClientConnector::with_connector(builder.build()).start()
} }
#[cfg(all(feature = "rust-tls", not(feature = "alpn")))] #[cfg(all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "ssl"))
))]
{ {
use rustls::ClientConfig; use rustls::ClientConfig;
use std::fs::File; use std::fs::File;
@@ -156,7 +155,7 @@ impl TestServer {
config.root_store.add_pem_file(pem_file).unwrap(); config.root_store.add_pem_file(pem_file).unwrap();
ClientConnector::with_connector(config).start() ClientConnector::with_connector(config).start()
} }
#[cfg(not(any(feature = "alpn", feature = "rust-tls")))] #[cfg(not(any(feature = "alpn", feature = "ssl", feature = "rust-tls")))]
{ {
ClientConnector::default().start() ClientConnector::default().start()
} }
@@ -260,30 +259,33 @@ impl Drop for TestServer {
/// ///
/// This type can be used to construct an instance of `TestServer` through a /// This type can be used to construct an instance of `TestServer` through a
/// builder-like pattern. /// builder-like pattern.
pub struct TestServerBuilder<S> { pub struct TestServerBuilder<S, F>
state: Box<Fn() -> S + Sync + Send + 'static>, where
#[cfg(feature = "alpn")] F: Fn() -> S + Send + Clone + 'static,
{
state: F,
#[cfg(any(feature = "alpn", feature = "ssl"))]
ssl: Option<SslAcceptorBuilder>, ssl: Option<SslAcceptorBuilder>,
#[cfg(feature = "rust-tls")] #[cfg(feature = "rust-tls")]
rust_ssl: Option<ServerConfig>, rust_ssl: Option<ServerConfig>,
} }
impl<S: 'static> TestServerBuilder<S> { impl<S: 'static, F> TestServerBuilder<S, F>
where
F: Fn() -> S + Send + Clone + 'static,
{
/// Create a new test server /// Create a new test server
pub fn new<F>(state: F) -> TestServerBuilder<S> pub fn new(state: F) -> TestServerBuilder<S, F> {
where
F: Fn() -> S + Sync + Send + 'static,
{
TestServerBuilder { TestServerBuilder {
state: Box::new(state), state,
#[cfg(feature = "alpn")] #[cfg(any(feature = "alpn", feature = "ssl"))]
ssl: None, ssl: None,
#[cfg(feature = "rust-tls")] #[cfg(feature = "rust-tls")]
rust_ssl: None, rust_ssl: None,
} }
} }
#[cfg(feature = "alpn")] #[cfg(any(feature = "alpn", feature = "ssl"))]
/// Create ssl server /// Create ssl server
pub fn ssl(mut self, ssl: SslAcceptorBuilder) -> Self { pub fn ssl(mut self, ssl: SslAcceptorBuilder) -> Self {
self.ssl = Some(ssl); self.ssl = Some(ssl);
@@ -299,15 +301,15 @@ impl<S: 'static> TestServerBuilder<S> {
#[allow(unused_mut)] #[allow(unused_mut)]
/// Configure test application and run test server /// Configure test application and run test server
pub fn start<F>(mut self, config: F) -> TestServer pub fn start<C>(mut self, config: C) -> TestServer
where where
F: Sync + Send + 'static + Fn(&mut TestApp<S>), C: Fn(&mut TestApp<S>) + Clone + Send + 'static,
{ {
let (tx, rx) = mpsc::channel(); let (tx, rx) = mpsc::channel();
let mut has_ssl = false; let mut has_ssl = false;
#[cfg(feature = "alpn")] #[cfg(any(feature = "alpn", feature = "ssl"))]
{ {
has_ssl = has_ssl || self.ssl.is_some(); has_ssl = has_ssl || self.ssl.is_some();
} }
@@ -326,19 +328,20 @@ impl<S: 'static> TestServerBuilder<S> {
let mut srv = HttpServer::new(move || { let mut srv = HttpServer::new(move || {
let mut app = TestApp::new(state()); let mut app = TestApp::new(state());
config(&mut app); config(&mut app);
vec![app] app
}).workers(1) }).workers(1)
.keep_alive(5)
.disable_signals(); .disable_signals();
tx.send((System::current(), addr, TestServer::get_conn())) tx.send((System::current(), addr, TestServer::get_conn()))
.unwrap(); .unwrap();
#[cfg(feature = "alpn")] #[cfg(any(feature = "alpn", feature = "ssl"))]
{ {
let ssl = self.ssl.take(); let ssl = self.ssl.take();
if let Some(ssl) = ssl { if let Some(ssl) = ssl {
let tcp = net::TcpListener::bind(addr).unwrap(); let tcp = net::TcpListener::bind(addr).unwrap();
srv = srv.listen_with(tcp, OpensslAcceptor::new(ssl).unwrap()); srv = srv.listen_ssl(tcp, ssl).unwrap();
} }
} }
#[cfg(feature = "rust-tls")] #[cfg(feature = "rust-tls")]
@@ -346,7 +349,7 @@ impl<S: 'static> TestServerBuilder<S> {
let ssl = self.rust_ssl.take(); let ssl = self.rust_ssl.take();
if let Some(ssl) = ssl { if let Some(ssl) = ssl {
let tcp = net::TcpListener::bind(addr).unwrap(); let tcp = net::TcpListener::bind(addr).unwrap();
srv = srv.listen_with(tcp, RustlsAcceptor::new(ssl)); srv = srv.listen_rustls(tcp, ssl);
} }
} }
if !has_ssl { if !has_ssl {

View File

@@ -148,7 +148,7 @@ impl Quoter {
if let Some(data) = cloned { if let Some(data) = cloned {
// Unsafe: we get data from http::Uri, which does utf-8 checks already // Unsafe: we get data from http::Uri, which does utf-8 checks already
// this code only decodes valid pct encoded values // this code only decodes valid pct encoded values
Some(unsafe { Rc::new(String::from_utf8_unchecked(data)) }) Some(Rc::new(unsafe { String::from_utf8_unchecked(data) }))
} else { } else {
None None
} }

View File

@@ -12,7 +12,6 @@ trait FnWith<T, R>: 'static {
} }
impl<T, R, F: Fn(T) -> R + 'static> FnWith<T, R> for F { impl<T, R, F: Fn(T) -> R + 'static> FnWith<T, R> for F {
#[cfg_attr(feature = "cargo-clippy", allow(boxed_local))]
fn call_with(self: &Self, arg: T) -> R { fn call_with(self: &Self, arg: T) -> R {
(*self)(arg) (*self)(arg)
} }
@@ -42,24 +41,6 @@ where
fn create_with_config(self, T::Config) -> WithAsync<T, S, R, I, E>; fn create_with_config(self, T::Config) -> WithAsync<T, S, R, I, E>;
} }
// impl<T1, T2, T3, S, F, R> WithFactory<(T1, T2, T3), S, R> for F
// where F: Fn(T1, T2, T3) -> R + 'static,
// T1: FromRequest<S> + 'static,
// T2: FromRequest<S> + 'static,
// T3: FromRequest<S> + 'static,
// R: Responder + 'static,
// S: 'static,
// {
// fn create(self) -> With<(T1, T2, T3), S, R> {
// With::new(move |(t1, t2, t3)| (self)(t1, t2, t3), (
// T1::Config::default(), T2::Config::default(), T3::Config::default()))
// }
// fn create_with_config(self, cfg: (T1::Config, T2::Config, T3::Config,)) -> With<(T1, T2, T3), S, R> {
// With::new(move |(t1, t2, t3)| (self)(t1, t2, t3), cfg)
// }
// }
#[doc(hidden)] #[doc(hidden)]
pub struct With<T, S, R> pub struct With<T, S, R>
where where

View File

@@ -46,7 +46,7 @@ impl Frame {
Frame::message(payload, OpCode::Close, true, genmask) Frame::message(payload, OpCode::Close, true, genmask)
} }
#[cfg_attr(feature = "cargo-clippy", allow(type_complexity))] #[cfg_attr(feature = "cargo-clippy", allow(clippy::type_complexity))]
fn read_copy_md<S>( fn read_copy_md<S>(
pl: &mut PayloadBuffer<S>, server: bool, max_size: usize, pl: &mut PayloadBuffer<S>, server: bool, max_size: usize,
) -> Poll<Option<(usize, bool, OpCode, usize, Option<u32>)>, ProtocolError> ) -> Poll<Option<(usize, bool, OpCode, usize, Option<u32>)>, ProtocolError>

View File

@@ -1,5 +1,5 @@
//! This is code from [Tungstenite project](https://github.com/snapview/tungstenite-rs) //! This is code from [Tungstenite project](https://github.com/snapview/tungstenite-rs)
#![cfg_attr(feature = "cargo-clippy", allow(cast_ptr_alignment))] #![cfg_attr(feature = "cargo-clippy", allow(clippy::cast_ptr_alignment))]
use std::ptr::copy_nonoverlapping; use std::ptr::copy_nonoverlapping;
use std::slice; use std::slice;
@@ -19,7 +19,7 @@ impl<'a> ShortSlice<'a> {
/// Faster version of `apply_mask()` which operates on 8-byte blocks. /// Faster version of `apply_mask()` which operates on 8-byte blocks.
#[inline] #[inline]
#[cfg_attr(feature = "cargo-clippy", allow(cast_lossless))] #[cfg_attr(feature = "cargo-clippy", allow(clippy::cast_lossless))]
pub(crate) fn apply_mask(buf: &mut [u8], mask_u32: u32) { pub(crate) fn apply_mask(buf: &mut [u8], mask_u32: u32) {
// Extend the mask to 64 bits // Extend the mask to 64 bits
let mut mask_u64 = ((mask_u32 as u64) << 32) | (mask_u32 as u64); let mut mask_u64 = ((mask_u32 as u64) << 32) | (mask_u32 as u64);
@@ -50,7 +50,10 @@ pub(crate) fn apply_mask(buf: &mut [u8], mask_u32: u32) {
// TODO: copy_nonoverlapping here compiles to call memcpy. While it is not so // TODO: copy_nonoverlapping here compiles to call memcpy. While it is not so
// inefficient, it could be done better. The compiler does not understand that // inefficient, it could be done better. The compiler does not understand that
// a `ShortSlice` must be smaller than a u64. // a `ShortSlice` must be smaller than a u64.
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))] #[cfg_attr(
feature = "cargo-clippy",
allow(clippy::needless_pass_by_value)
)]
fn xor_short(buf: ShortSlice, mask: u64) { fn xor_short(buf: ShortSlice, mask: u64) {
// Unsafe: we know that a `ShortSlice` fits in a u64 // Unsafe: we know that a `ShortSlice` fits in a u64
unsafe { unsafe {

BIN
tests/identity.pfx Normal file

Binary file not shown.

View File

@@ -8,7 +8,8 @@ extern crate rand;
#[cfg(all(unix, feature = "uds"))] #[cfg(all(unix, feature = "uds"))]
extern crate tokio_uds; extern crate tokio_uds;
use std::io::Read; use std::io::{Read, Write};
use std::{net, thread};
use bytes::Bytes; use bytes::Bytes;
use flate2::read::GzDecoder; use flate2::read::GzDecoder;
@@ -66,6 +67,16 @@ fn test_simple() {
assert_eq!(bytes, Bytes::from_static(STR.as_ref())); assert_eq!(bytes, Bytes::from_static(STR.as_ref()));
} }
#[test]
fn test_connection_close() {
let mut srv =
test::TestServer::new(|app| app.handler(|_| HttpResponse::Ok().body(STR)));
let request = srv.get().header("Connection", "close").finish().unwrap();
let response = srv.execute(request.send()).unwrap();
assert!(response.status().is_success());
}
#[test] #[test]
fn test_with_query_parameter() { fn test_with_query_parameter() {
let mut srv = test::TestServer::new(|app| { let mut srv = test::TestServer::new(|app| {
@@ -396,24 +407,29 @@ fn test_client_cookie_handling() {
let cookie2 = cookie2b.clone(); let cookie2 = cookie2b.clone();
app.handler(move |req: &HttpRequest| { app.handler(move |req: &HttpRequest| {
// Check cookies were sent correctly // Check cookies were sent correctly
req.cookie("cookie1").ok_or_else(err) req.cookie("cookie1")
.and_then(|c1| if c1.value() == "value1" { .ok_or_else(err)
.and_then(|c1| {
if c1.value() == "value1" {
Ok(()) Ok(())
} else { } else {
Err(err()) Err(err())
}) }
.and_then(|()| req.cookie("cookie2").ok_or_else(err)) }).and_then(|()| req.cookie("cookie2").ok_or_else(err))
.and_then(|c2| if c2.value() == "value2" { .and_then(|c2| {
if c2.value() == "value2" {
Ok(()) Ok(())
} else { } else {
Err(err()) Err(err())
}) }
// Send some cookies back })
.map(|_| HttpResponse::Ok() // Send some cookies back
.cookie(cookie1.clone()) .map(|_| {
.cookie(cookie2.clone()) HttpResponse::Ok()
.finish() .cookie(cookie1.clone())
) .cookie(cookie2.clone())
.finish()
})
}) })
}); });
@@ -460,3 +476,33 @@ fn test_default_headers() {
"\"" "\""
))); )));
} }
#[test]
fn client_read_until_eof() {
let addr = test::TestServer::unused_addr();
thread::spawn(move || {
let lst = net::TcpListener::bind(addr).unwrap();
for stream in lst.incoming() {
let mut stream = stream.unwrap();
let mut b = [0; 1000];
let _ = stream.read(&mut b).unwrap();
let _ = stream
.write_all(b"HTTP/1.1 200 OK\r\nconnection: close\r\n\r\nwelcome!");
}
});
let mut sys = actix::System::new("test");
// client request
let req = client::ClientRequest::get(format!("http://{}/", addr).as_str())
.finish()
.unwrap();
let response = sys.block_on(req.send()).unwrap();
assert!(response.status().is_success());
// read response
let bytes = sys.block_on(response.body()).unwrap();
assert_eq!(bytes, Bytes::from_static(b"welcome!"));
}

View File

@@ -0,0 +1,81 @@
extern crate actix;
extern crate actix_net;
extern crate actix_web;
use std::{thread, time};
use actix::System;
use actix_net::server::Server;
use actix_net::service::NewServiceExt;
use actix_web::server::{HttpService, KeepAlive, ServiceConfig, StreamConfiguration};
use actix_web::{client, http, test, App, HttpRequest};
#[test]
fn test_custom_pipeline() {
let addr = test::TestServer::unused_addr();
thread::spawn(move || {
Server::new()
.bind("test", addr, move || {
let app = App::new()
.route("/", http::Method::GET, |_: HttpRequest| "OK")
.finish();
let settings = ServiceConfig::build(app)
.keep_alive(KeepAlive::Disabled)
.client_timeout(1000)
.client_shutdown(1000)
.server_hostname("localhost")
.server_address(addr)
.finish();
StreamConfiguration::new()
.nodelay(true)
.tcp_keepalive(Some(time::Duration::from_secs(10)))
.and_then(HttpService::new(settings))
}).unwrap()
.run();
});
let mut sys = System::new("test");
{
let req = client::ClientRequest::get(format!("http://{}/", addr).as_str())
.finish()
.unwrap();
let response = sys.block_on(req.send()).unwrap();
assert!(response.status().is_success());
}
}
#[test]
fn test_h1() {
use actix_web::server::H1Service;
let addr = test::TestServer::unused_addr();
thread::spawn(move || {
Server::new()
.bind("test", addr, move || {
let app = App::new()
.route("/", http::Method::GET, |_: HttpRequest| "OK")
.finish();
let settings = ServiceConfig::build(app)
.keep_alive(KeepAlive::Disabled)
.client_timeout(1000)
.client_shutdown(1000)
.server_hostname("localhost")
.server_address(addr)
.finish();
H1Service::new(settings)
}).unwrap()
.run();
});
let mut sys = System::new("test");
{
let req = client::ClientRequest::get(format!("http://{}/", addr).as_str())
.finish()
.unwrap();
let response = sys.block_on(req.send()).unwrap();
assert!(response.status().is_success());
}
}

View File

@@ -1,4 +1,5 @@
extern crate actix; extern crate actix;
extern crate actix_net;
extern crate actix_web; extern crate actix_web;
#[cfg(feature = "brotli")] #[cfg(feature = "brotli")]
extern crate brotli2; extern crate brotli2;
@@ -9,9 +10,18 @@ extern crate h2;
extern crate http as modhttp; extern crate http as modhttp;
extern crate rand; extern crate rand;
extern crate tokio; extern crate tokio;
extern crate tokio_current_thread;
extern crate tokio_current_thread as current_thread;
extern crate tokio_reactor; extern crate tokio_reactor;
extern crate tokio_tcp; extern crate tokio_tcp;
#[cfg(feature = "tls")]
extern crate native_tls;
#[cfg(feature = "ssl")]
extern crate openssl;
#[cfg(feature = "rust-tls")]
extern crate rustls;
use std::io::{Read, Write}; use std::io::{Read, Write};
use std::sync::Arc; use std::sync::Arc;
use std::{thread, time}; use std::{thread, time};
@@ -28,8 +38,8 @@ use h2::client as h2client;
use modhttp::Request; use modhttp::Request;
use rand::distributions::Alphanumeric; use rand::distributions::Alphanumeric;
use rand::Rng; use rand::Rng;
use tokio::executor::current_thread;
use tokio::runtime::current_thread::Runtime; use tokio::runtime::current_thread::Runtime;
use tokio_current_thread::spawn;
use tokio_tcp::TcpStream; use tokio_tcp::TcpStream;
use actix_web::*; use actix_web::*;
@@ -883,6 +893,209 @@ fn test_brotli_encoding_large() {
assert_eq!(bytes, Bytes::from(data)); assert_eq!(bytes, Bytes::from(data));
} }
#[cfg(all(feature = "brotli", feature = "ssl"))]
#[test]
fn test_brotli_encoding_large_ssl() {
use actix::{Actor, System};
use openssl::ssl::{
SslAcceptor, SslConnector, SslFiletype, SslMethod, SslVerifyMode,
};
// load ssl keys
let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
builder
.set_private_key_file("tests/key.pem", SslFiletype::PEM)
.unwrap();
builder
.set_certificate_chain_file("tests/cert.pem")
.unwrap();
let data = STR.repeat(10);
let srv = test::TestServer::build().ssl(builder).start(|app| {
app.handler(|req: &HttpRequest| {
req.body()
.and_then(|bytes: Bytes| {
Ok(HttpResponse::Ok()
.content_encoding(http::ContentEncoding::Identity)
.body(bytes))
}).responder()
})
});
let mut rt = System::new("test");
// client connector
let mut builder = SslConnector::builder(SslMethod::tls()).unwrap();
builder.set_verify(SslVerifyMode::NONE);
let conn = client::ClientConnector::with_connector(builder.build()).start();
// body
let mut e = BrotliEncoder::new(Vec::new(), 5);
e.write_all(data.as_ref()).unwrap();
let enc = e.finish().unwrap();
// client request
let request = client::ClientRequest::build()
.uri(srv.url("/"))
.method(http::Method::POST)
.header(http::header::CONTENT_ENCODING, "br")
.with_connector(conn)
.body(enc)
.unwrap();
let response = rt.block_on(request.send()).unwrap();
assert!(response.status().is_success());
// read response
let bytes = rt.block_on(response.body()).unwrap();
assert_eq!(bytes, Bytes::from(data));
}
#[cfg(all(feature = "rust-tls", feature = "ssl"))]
#[test]
fn test_reading_deflate_encoding_large_random_ssl() {
use actix::{Actor, System};
use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
use rustls::internal::pemfile::{certs, rsa_private_keys};
use rustls::{NoClientAuth, ServerConfig};
use std::fs::File;
use std::io::BufReader;
// load ssl keys
let mut config = ServerConfig::new(NoClientAuth::new());
let cert_file = &mut BufReader::new(File::open("tests/cert.pem").unwrap());
let key_file = &mut BufReader::new(File::open("tests/key.pem").unwrap());
let cert_chain = certs(cert_file).unwrap();
let mut keys = rsa_private_keys(key_file).unwrap();
config.set_single_cert(cert_chain, keys.remove(0)).unwrap();
let data = rand::thread_rng()
.sample_iter(&Alphanumeric)
.take(160_000)
.collect::<String>();
let srv = test::TestServer::build().rustls(config).start(|app| {
app.handler(|req: &HttpRequest| {
req.body()
.and_then(|bytes: Bytes| {
Ok(HttpResponse::Ok()
.content_encoding(http::ContentEncoding::Identity)
.body(bytes))
}).responder()
})
});
let mut rt = System::new("test");
// client connector
let mut builder = SslConnector::builder(SslMethod::tls()).unwrap();
builder.set_verify(SslVerifyMode::NONE);
let conn = client::ClientConnector::with_connector(builder.build()).start();
// encode data
let mut e = ZlibEncoder::new(Vec::new(), Compression::default());
e.write_all(data.as_ref()).unwrap();
let enc = e.finish().unwrap();
// client request
let request = client::ClientRequest::build()
.uri(srv.url("/"))
.method(http::Method::POST)
.header(http::header::CONTENT_ENCODING, "deflate")
.with_connector(conn)
.body(enc)
.unwrap();
let response = rt.block_on(request.send()).unwrap();
assert!(response.status().is_success());
// read response
let bytes = rt.block_on(response.body()).unwrap();
assert_eq!(bytes.len(), data.len());
assert_eq!(bytes, Bytes::from(data));
}
#[cfg(all(feature = "tls", feature = "ssl"))]
#[test]
fn test_reading_deflate_encoding_large_random_tls() {
use native_tls::{Identity, TlsAcceptor};
use openssl::ssl::{
SslAcceptor, SslConnector, SslFiletype, SslMethod, SslVerifyMode,
};
use std::fs::File;
use std::sync::mpsc;
use actix::{Actor, System};
let (tx, rx) = mpsc::channel();
// load ssl keys
let mut file = File::open("tests/identity.pfx").unwrap();
let mut identity = vec![];
file.read_to_end(&mut identity).unwrap();
let identity = Identity::from_pkcs12(&identity, "1").unwrap();
let acceptor = TlsAcceptor::new(identity).unwrap();
// load ssl keys
let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
builder
.set_private_key_file("tests/key.pem", SslFiletype::PEM)
.unwrap();
builder
.set_certificate_chain_file("tests/cert.pem")
.unwrap();
let data = rand::thread_rng()
.sample_iter(&Alphanumeric)
.take(160_000)
.collect::<String>();
let addr = test::TestServer::unused_addr();
thread::spawn(move || {
System::run(move || {
server::new(|| {
App::new().handler("/", |req: &HttpRequest| {
req.body()
.and_then(|bytes: Bytes| {
Ok(HttpResponse::Ok()
.content_encoding(http::ContentEncoding::Identity)
.body(bytes))
}).responder()
})
}).bind_tls(addr, acceptor)
.unwrap()
.start();
let _ = tx.send(System::current());
});
});
let sys = rx.recv().unwrap();
let mut rt = System::new("test");
// client connector
let mut builder = SslConnector::builder(SslMethod::tls()).unwrap();
builder.set_verify(SslVerifyMode::NONE);
let conn = client::ClientConnector::with_connector(builder.build()).start();
// encode data
let mut e = ZlibEncoder::new(Vec::new(), Compression::default());
e.write_all(data.as_ref()).unwrap();
let enc = e.finish().unwrap();
// client request
let request = client::ClientRequest::build()
.uri(format!("https://{}/", addr))
.method(http::Method::POST)
.header(http::header::CONTENT_ENCODING, "deflate")
.with_connector(conn)
.body(enc)
.unwrap();
let response = rt.block_on(request.send()).unwrap();
assert!(response.status().is_success());
// read response
let bytes = rt.block_on(response.body()).unwrap();
assert_eq!(bytes.len(), data.len());
assert_eq!(bytes, Bytes::from(data));
let _ = sys.stop();
}
#[test] #[test]
fn test_h2() { fn test_h2() {
let srv = test::TestServer::new(|app| app.handler(|_| HttpResponse::Ok().body(STR))); let srv = test::TestServer::new(|app| app.handler(|_| HttpResponse::Ok().body(STR)));
@@ -904,7 +1117,7 @@ fn test_h2() {
let (response, _) = client.send_request(request, false).unwrap(); let (response, _) = client.send_request(request, false).unwrap();
// Spawn a task to run the conn... // Spawn a task to run the conn...
current_thread::spawn(h2.map_err(|e| println!("GOT ERR={:?}", e))); spawn(h2.map_err(|e| println!("GOT ERR={:?}", e)));
response.and_then(|response| { response.and_then(|response| {
assert_eq!(response.status(), http::StatusCode::OK); assert_eq!(response.status(), http::StatusCode::OK);
@@ -932,6 +1145,28 @@ fn test_application() {
assert!(response.status().is_success()); assert!(response.status().is_success());
} }
#[test]
fn test_default_404_handler_response() {
let mut srv = test::TestServer::with_factory(|| {
App::new()
.prefix("/app")
.resource("", |r| r.f(|_| HttpResponse::Ok()))
.resource("/", |r| r.f(|_| HttpResponse::Ok()))
});
let addr = srv.addr();
let mut buf = [0; 24];
let request = TcpStream::connect(&addr)
.and_then(|sock| {
tokio::io::write_all(sock, "HEAD / HTTP/1.1\r\nHost: localhost\r\n\r\n")
.and_then(|(sock, _)| tokio::io::read_exact(sock, &mut buf))
.and_then(|(_, buf)| Ok(buf))
}).map_err(|e| panic!("{:?}", e));
let response = srv.execute(request).unwrap();
let rep = String::from_utf8_lossy(&response[..]);
assert!(rep.contains("HTTP/1.1 404 Not Found"));
}
#[test] #[test]
fn test_server_cookies() { fn test_server_cookies() {
use actix_web::http; use actix_web::http;
@@ -986,3 +1221,180 @@ fn test_server_cookies() {
assert_eq!(cookies[1], first_cookie); assert_eq!(cookies[1], first_cookie);
} }
} }
#[test]
fn test_slow_request() {
use actix::System;
use std::net;
use std::sync::mpsc;
let (tx, rx) = mpsc::channel();
let addr = test::TestServer::unused_addr();
thread::spawn(move || {
System::run(move || {
let srv = server::new(|| {
vec![App::new().resource("/", |r| {
r.method(http::Method::GET).f(|_| HttpResponse::Ok())
})]
});
let srv = srv.bind(addr).unwrap();
srv.client_timeout(200).start();
let _ = tx.send(System::current());
});
});
let sys = rx.recv().unwrap();
thread::sleep(time::Duration::from_millis(200));
let mut stream = net::TcpStream::connect(addr).unwrap();
let mut data = String::new();
let _ = stream.read_to_string(&mut data);
assert!(data.starts_with("HTTP/1.1 408 Request Timeout"));
let mut stream = net::TcpStream::connect(addr).unwrap();
let _ = stream.write_all(b"GET /test/tests/test HTTP/1.1\r\n");
let mut data = String::new();
let _ = stream.read_to_string(&mut data);
assert!(data.starts_with("HTTP/1.1 408 Request Timeout"));
sys.stop();
}
#[test]
fn test_malformed_request() {
use actix::System;
use std::net;
use std::sync::mpsc;
let (tx, rx) = mpsc::channel();
let addr = test::TestServer::unused_addr();
thread::spawn(move || {
System::run(move || {
let srv = server::new(|| {
App::new().resource("/", |r| {
r.method(http::Method::GET).f(|_| HttpResponse::Ok())
})
});
let _ = srv.bind(addr).unwrap().start();
let _ = tx.send(System::current());
});
});
let sys = rx.recv().unwrap();
thread::sleep(time::Duration::from_millis(200));
let mut stream = net::TcpStream::connect(addr).unwrap();
let _ = stream.write_all(b"GET /test/tests/test HTTP1.1\r\n");
let mut data = String::new();
let _ = stream.read_to_string(&mut data);
assert!(data.starts_with("HTTP/1.1 400 Bad Request"));
sys.stop();
}
#[test]
fn test_app_404() {
let mut srv = test::TestServer::with_factory(|| {
App::new().prefix("/prefix").resource("/", |r| {
r.method(http::Method::GET).f(|_| HttpResponse::Ok())
})
});
let request = srv.client(http::Method::GET, "/prefix/").finish().unwrap();
let response = srv.execute(request.send()).unwrap();
assert!(response.status().is_success());
let request = srv.client(http::Method::GET, "/").finish().unwrap();
let response = srv.execute(request.send()).unwrap();
assert_eq!(response.status(), http::StatusCode::NOT_FOUND);
}
#[test]
#[cfg(feature = "ssl")]
fn test_ssl_handshake_timeout() {
use actix::System;
use openssl::ssl::{SslAcceptor, SslFiletype, SslMethod};
use std::net;
use std::sync::mpsc;
let (tx, rx) = mpsc::channel();
let addr = test::TestServer::unused_addr();
// load ssl keys
let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
builder
.set_private_key_file("tests/key.pem", SslFiletype::PEM)
.unwrap();
builder
.set_certificate_chain_file("tests/cert.pem")
.unwrap();
thread::spawn(move || {
System::run(move || {
let srv = server::new(|| {
App::new().resource("/", |r| {
r.method(http::Method::GET).f(|_| HttpResponse::Ok())
})
});
srv.bind_ssl(addr, builder)
.unwrap()
.workers(1)
.client_timeout(200)
.start();
let _ = tx.send(System::current());
});
});
let sys = rx.recv().unwrap();
let mut stream = net::TcpStream::connect(addr).unwrap();
let mut data = String::new();
let _ = stream.read_to_string(&mut data);
assert!(data.is_empty());
let _ = sys.stop();
}
#[test]
fn test_content_length() {
use actix_web::http::header::{HeaderName, HeaderValue};
use http::StatusCode;
let mut srv = test::TestServer::new(move |app| {
app.resource("/{status}", |r| {
r.f(|req: &HttpRequest| {
let indx: usize =
req.match_info().get("status").unwrap().parse().unwrap();
let statuses = [
StatusCode::NO_CONTENT,
StatusCode::CONTINUE,
StatusCode::SWITCHING_PROTOCOLS,
StatusCode::PROCESSING,
StatusCode::OK,
StatusCode::NOT_FOUND,
];
HttpResponse::new(statuses[indx])
})
});
});
let addr = srv.addr();
let mut get_resp = |i| {
let url = format!("http://{}/{}", addr, i);
let req = srv.get().uri(url).finish().unwrap();
srv.execute(req.send()).unwrap()
};
let header = HeaderName::from_static("content-length");
let value = HeaderValue::from_static("0");
for i in 0..4 {
let response = get_resp(i);
assert_eq!(response.headers().get(&header), None);
}
for i in 4..6 {
let response = get_resp(i);
assert_eq!(response.headers().get(&header), Some(&value));
}
}

View File

@@ -5,12 +5,16 @@ extern crate futures;
extern crate http; extern crate http;
extern crate rand; extern crate rand;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::{thread, time};
use bytes::Bytes; use bytes::Bytes;
use futures::Stream; use futures::Stream;
use rand::distributions::Alphanumeric; use rand::distributions::Alphanumeric;
use rand::Rng; use rand::Rng;
#[cfg(feature = "alpn")] #[cfg(feature = "ssl")]
extern crate openssl; extern crate openssl;
#[cfg(feature = "rust-tls")] #[cfg(feature = "rust-tls")]
extern crate rustls; extern crate rustls;
@@ -278,9 +282,8 @@ fn test_server_send_bin() {
} }
#[test] #[test]
#[cfg(feature = "alpn")] #[cfg(feature = "ssl")]
fn test_ws_server_ssl() { fn test_ws_server_ssl() {
extern crate openssl;
use openssl::ssl::{SslAcceptor, SslFiletype, SslMethod}; use openssl::ssl::{SslAcceptor, SslFiletype, SslMethod};
// load ssl keys // load ssl keys
@@ -316,7 +319,6 @@ fn test_ws_server_ssl() {
#[test] #[test]
#[cfg(feature = "rust-tls")] #[cfg(feature = "rust-tls")]
fn test_ws_server_rust_tls() { fn test_ws_server_rust_tls() {
extern crate rustls;
use rustls::internal::pemfile::{certs, rsa_private_keys}; use rustls::internal::pemfile::{certs, rsa_private_keys};
use rustls::{NoClientAuth, ServerConfig}; use rustls::{NoClientAuth, ServerConfig};
use std::fs::File; use std::fs::File;
@@ -351,3 +353,42 @@ fn test_ws_server_rust_tls() {
assert_eq!(item, data); assert_eq!(item, data);
} }
} }
struct WsStopped(Arc<AtomicUsize>);
impl Actor for WsStopped {
type Context = ws::WebsocketContext<Self>;
fn stopped(&mut self, _: &mut Self::Context) {
self.0.fetch_add(1, Ordering::Relaxed);
}
}
impl StreamHandler<ws::Message, ws::ProtocolError> for WsStopped {
fn handle(&mut self, msg: ws::Message, ctx: &mut Self::Context) {
match msg {
ws::Message::Text(text) => ctx.text(text),
_ => (),
}
}
}
#[test]
fn test_ws_stopped() {
let num = Arc::new(AtomicUsize::new(0));
let num2 = num.clone();
let mut srv = test::TestServer::new(move |app| {
let num3 = num2.clone();
app.handler(move |req| ws::start(req, WsStopped(num3.clone())))
});
{
let (reader, mut writer) = srv.ws().unwrap();
writer.text("text");
let (item, _) = srv.execute(reader.into_future()).unwrap();
assert_eq!(item, Some(ws::Message::Text("text".to_owned())));
}
thread::sleep(time::Duration::from_millis(1000));
assert_eq!(num.load(Ordering::Relaxed), 1);
}