1
0
mirror of https://github.com/fafhrd91/actix-web synced 2025-07-23 17:28:19 +02:00

Compare commits

...

156 Commits
v0.7.6 ... 0.7

Author SHA1 Message Date
Zeyi Fan
6813ce789d add x509 to stream extension (#844) 2019-05-15 08:38:59 -07:00
Nikolay Kim
cc6e0c6d04 Fix client payload decompression #674 2019-03-28 20:40:25 -07:00
Zeyi Fan
d9496d46d1 [0.7] Fix never-ending HTTP2 empty response (#737)
* Fix never-ending HTTP2 empty response #737
2019-03-28 17:40:12 -07:00
Jannik Keye
bf8262196f feat: enable use of patch as request method (#718) 2019-03-14 11:36:10 +03:00
Luca Bruno
17ecdd63d2 httpresponse: add constructor for HttpResponseBuilder (#697) 2019-03-13 17:20:18 +03:00
David McGuire
cc7f6b5eef Fix preflight CORS header compliance; refactor previous patch. (#717) 2019-03-11 07:26:54 +03:00
Stephen Ellis
ceca96da28 Added HTTP Authentication for Client (#540) 2019-03-06 12:56:12 +03:00
Douman
42f030d3f4 Ensure that Content-Length zero is specified in empty request 2019-03-05 08:37:15 +03:00
Hugo Benício
6d11ee683f fixing little typo in docs (#711) 2019-03-01 11:34:58 +03:00
Douman
80d4cbe301 Add change notes for new HttpResponseBuilder 2019-02-27 21:37:20 +03:00
Kornel
69d710dbce Add insert and remove() to response builder (#707) 2019-02-27 15:52:42 +03:00
Michael Edwards
0059a55dfb Fix typo 2019-02-13 14:31:28 +03:00
cuebyte
c695358bcb Ignored the If-Modified-Since if If-None-Match is specified (#680) (#692) 2019-02-09 00:33:00 +03:00
Jason Hills
b018e4abaf Fixes TestRequest::with_cookie panic 2019-02-07 07:55:27 +03:00
Vladislav Stepanov
346d85a884 Serve static file directly instead of redirecting (#676) 2019-02-04 13:20:46 +03:00
wildarch
9968afe4a6 Use NamedFile with an existing File (#670) 2019-01-28 08:07:28 +03:00
Tomas Izquierdo Garcia-Faria
f5bec968c7 Bump v_htmlescape version to 0.4 2019-01-25 11:31:42 +03:00
Neil Jensen
a534fdd125 Add io handling for ECONNRESET when data has already been received 2019-01-20 08:45:33 +03:00
rishflab
3431fff4d7 Fixed example in client documentation. This closes #665. 2019-01-14 07:44:30 +03:00
Sameer Puri
d6df2e3399 Fix HttpResponse doc spelling "os" to "of" 2019-01-11 08:45:15 +03:00
Douman
1fbb52ad3b 0.7.18 Bump 2019-01-10 17:05:18 +03:00
Julian Tescher
e5cdd22720 Fix test server listener thread leak (#655) 2019-01-08 10:42:22 -08:00
Douman
4f2e970732 Tidy up CHANGES.md 2019-01-08 10:49:03 +03:00
Douman
4d45313f9d Decode special characters when handling static files 2019-01-08 10:46:58 +03:00
Juan Aguilar
55a2a59906 Improve change askama_escape in favor of v_htmlescape (#651) 2019-01-03 22:34:18 +03:00
Ji Qu
61883042c2 Add with-cookie init-method for TestRequest (#647) 2019-01-02 13:24:08 +03:00
Douman
799c6eb719 0.7.17 Bump 2018-12-25 16:28:36 +03:00
Douman
037a1c6a24 Bump min version of rustc
Due to actix & trust-dns requirement
2018-12-24 21:17:09 +03:00
BlueC0re
bfdf762062 Only return a single Origin value (#644)
Only return a single origin if matched.
2018-12-24 21:16:07 +03:00
Nikolay Kim
477bf0d8ae Send HTTP/1.1 100 Continue if request contains expect: continue header #634 2018-12-23 10:19:12 -08:00
Phil Booth
e9fe3879df Support custom content types in JsonConfig 2018-12-23 08:27:47 +03:00
Douman
1a940d4c18 H1 decoded should ignore header cases 2018-12-16 18:34:32 +03:00
Douman
e8bdcb1c08 Update min version of http
Closes #630
2018-12-15 09:26:56 +03:00
Douman
46db09428c Prepare release 0.7.16 2018-12-11 21:04:05 +03:00
ethanpailes
90eef31cc0 impl ResponseError for SendError when possible (#619) 2018-12-11 19:37:52 +03:00
Akos Vandra
86af02156b add impl FromRequest for Either<A,B> (#618) 2018-12-10 19:02:05 +03:00
Douman
ac9fc662c6 Bump version to 0.7.15 2018-12-05 18:27:06 +03:00
Douman
0745a1a9f8 Remove usage of upcoming keyword async
AsyncResult::async is replaced with AsyncResult::future
2018-12-05 18:23:04 +03:00
silwol
b1635bc0e6 Update some dependencies (#612)
* Update rand to 0.6

* Update parking_lot to 0.7

* Update env_logger to 0.6
2018-12-04 09:58:22 +03:00
Kelly Thomas Kline
08c7743bb8 Add set_mailbox_capacity() function 2018-12-02 08:40:09 +03:00
vemoo
68c5d6e6d6 impl From<Cow<'static, [u8]>> for Binary (#611)
impl `From` for `Cow<'static, [u8]>`  and `From<Cow<'static, str>>` for `Binary`
2018-12-02 08:32:55 +03:00
François
c386353337 decode reserved characters when extracting path with configuration (#577)
* decode reserved characters when extracting path with configuration

* remove useless clone

* add a method to get decoded parameter by name
2018-11-24 16:54:11 +03:00
Douman
9aab382ea8 Allow user to provide addr to custom resolver
We basically swaps Addr with Recipient to enable user to use custom resolver
2018-11-23 15:36:12 +03:00
Douman
389cb13cd6 Export PathConfig and QueryConfig
Closes #597
2018-11-20 23:06:38 +03:00
Huston Bokinsky
6a93178479 Complete error helper functions. 2018-11-20 08:07:46 +03:00
Nikolay Kim
cd9901c928 prepare release 2018-11-14 16:24:01 -08:00
Nikolay Kim
1ef0eed0bd do not stop on keep-alive timer if sink is not completly flushed 2018-11-08 20:46:13 -08:00
Nikolay Kim
61b1030882 Fix websockets connection drop if request contains content-length header #567 2018-11-08 20:35:47 -08:00
Nikolay Kim
7065c540e1 set nodelay on socket #560 2018-11-08 16:29:43 -08:00
Nikolay Kim
aed3933ae8 Merge branch 'master' of github.com:actix/actix-web 2018-11-08 16:15:45 -08:00
Nikolay Kim
5b7740dee3 hide ChunkedReadFile 2018-11-08 16:12:16 -08:00
imaperson
1a0bf32ec7 Fix unnecessary owned string and change htmlescape in favor of askama_escape (#584) 2018-11-08 16:08:06 -08:00
Nikolay Kim
9ab586e24e update actix-net dep 2018-11-08 16:06:23 -08:00
Nikolay Kim
62f1c90c8d update base64 dep 2018-11-07 21:18:40 -08:00
Nikolay Kim
2677d325a7 fix keep-alive timer reset 2018-11-07 21:09:33 -08:00
Julian Tescher
8e354021d4 Add SameSite option to identity middleware cookie (#581) 2018-11-07 23:24:06 +03:00
Stanislav Tkach
3b536ee96c Use old clippy attributes syntax (#562) 2018-11-01 11:14:48 +03:00
Nikolay Kim
cfd9a56ff7 Add async/await ref 2018-10-28 09:24:19 -07:00
Douman
5f91f5eda6 Correct IoStream::set_keepalive for UDS (#564)
Enable uds feature in tests
2018-10-26 10:59:06 +03:00
François
42d5d48e71 add a way to configure error treatment for Query and Path extractors (#550)
* add a way to configure error treatment for Query extractor

* allow error handler to be customized for Path extractor
2018-10-20 06:43:43 +03:00
Douman
960274ada8 Refactoring of server output to not exclude HTTP_10 (#552) 2018-10-19 07:52:10 +03:00
ivan-ochc
f383f618b5 Fix typo in error message (#554) 2018-10-18 21:27:31 +03:00
Nikolay Kim
c04b4678f1 bump version 2018-10-14 08:10:41 -07:00
Nikolay Kim
dd948f836e HttpServer not sending streamed request body on HTTP/2 requests #544 2018-10-14 08:08:12 -07:00
Douman
63a443fce0 Correct build script 2018-10-13 10:05:21 +03:00
Douman
d145136e56 Add individual check for TLS features 2018-10-13 09:54:03 +03:00
jeizsm
32145cf6c3 fix after update tokio-rustls (#542) 2018-10-11 11:05:07 +03:00
Nikolay Kim
ec8aef6b43 update dep versions 2018-10-10 08:36:16 -07:00
Nikolay Kim
f45038bbfe remove unused code 2018-10-09 13:23:37 -07:00
Nikolay Kim
c63838bb71 fix 204 support for http/2 2018-10-09 13:12:49 -07:00
Nikolay Kim
4d17a9afcc update version 2018-10-09 11:42:52 -07:00
Nikolay Kim
65e9201b4d Fixed panic during graceful shutdown 2018-10-09 11:35:57 -07:00
Nikolay Kim
c3ad516f56 disable shutdown atm 2018-10-09 09:45:24 -07:00
Nikolay Kim
93b1c5fd46 update deps 2018-10-08 21:58:37 -07:00
Nikolay Kim
4e7fac08b9 do not override content-length header 2018-10-08 15:30:59 -07:00
Nikolay Kim
07f6ca4b71 Merge branch 'master' of github.com:actix/actix-web 2018-10-08 13:06:49 -07:00
Nikolay Kim
03d988b898 refactor date rendering 2018-10-08 10:16:19 -07:00
Nikolay Kim
cfad5bf1f3 enable slow request timeout for h2 dispatcher 2018-10-08 07:47:42 -07:00
Danil Berestov
10678a22af test content length (#532) 2018-10-06 08:17:20 +03:00
lzx
7ae5a43877 httpresponse.rs doc fix (#534) 2018-10-06 08:16:12 +03:00
Nikolay Kim
1e1a4f846e use actix-net cell features 2018-10-02 22:23:51 -07:00
Nikolay Kim
49eea3bf76 travis config 2018-10-02 20:22:51 -07:00
Nikolay Kim
b0677aa029 fix stable compatibility 2018-10-02 19:42:24 -07:00
Nikolay Kim
401ea574c0 make AcceptorTimeout::new public 2018-10-02 19:31:30 -07:00
Nikolay Kim
bbcd618304 export AcceptorTimeout 2018-10-02 19:12:08 -07:00
Nikolay Kim
1f68ce8541 fix tests 2018-10-02 19:05:58 -07:00
Nikolay Kim
2710f70e39 add H1 transport 2018-10-02 17:30:29 -07:00
Nikolay Kim
ae5c4dfb78 refactor http channels list; rename WorkerSettings 2018-10-02 15:25:32 -07:00
Nikolay Kim
d7379bd10b update server ssl tests; upgrade rustls 2018-10-02 13:41:33 -07:00
Nikolay Kim
b59712c439 add ssl handshake timeout tests 2018-10-02 11:32:43 -07:00
Nikolay Kim
724668910b fix ssh handshake timeout 2018-10-02 11:18:59 -07:00
Nikolay Kim
61c7534e03 fix stream flushing 2018-10-02 10:43:23 -07:00
Douman
f8b176de9e Fix no_http2 flag in HttpServer (#526) 2018-10-02 20:09:31 +03:00
Danil Berestov
c8505bb53f content-length bug fix (#525)
* content-length bug fix

* changes.md is updated

* typo
2018-10-02 09:15:48 -07:00
Nikolay Kim
eed377e773 uneeded dep 2018-10-02 00:20:27 -07:00
Nikolay Kim
f3ce6574e4 fix client timer and add slow request tests 2018-10-02 00:19:28 -07:00
Nikolay Kim
f007860a16 cleanup warnings 2018-10-01 22:48:11 -07:00
Nikolay Kim
fdfadb52e1 fix doc test for State 2018-10-01 22:29:30 -07:00
Nikolay Kim
368f73513a set tcp-keepalive for test as well 2018-10-01 22:25:53 -07:00
Nikolay Kim
c674ea9126 add StreamConfiguration service 2018-10-01 22:23:02 -07:00
Nikolay Kim
7c78797d9b proper stop for test_ws_stopped test 2018-10-01 21:30:00 -07:00
Nikolay Kim
84edc57fd9 increase sleep time 2018-10-01 21:19:27 -07:00
Nikolay Kim
127af92541 clippy warnings 2018-10-01 21:16:56 -07:00
Nikolay Kim
e4686f6c8d set socket linger to 0 on timeout 2018-10-01 20:53:22 -07:00
Nikolay Kim
1bac65de4c add websocket stopped test 2018-10-01 20:15:26 -07:00
Nikolay Kim
16945a554a add client shutdown timeout 2018-10-01 20:04:16 -07:00
Nikolay Kim
91af3ca148 simplify h1 dispatcher 2018-10-01 19:18:24 -07:00
Nikolay Kim
2217a152cb expose app error by http service 2018-10-01 15:19:49 -07:00
Nikolay Kim
c1e0b4f322 expose internal http server types and allow to create custom http pipelines 2018-10-01 14:43:06 -07:00
Nikolay Kim
5966ee6192 add HttpServer::register() function, allows to register services in actix net server 2018-09-28 16:03:53 -07:00
Nikolay Kim
4aac3d6a92 refactor keep-alive timer 2018-09-28 15:04:59 -07:00
Nikolay Kim
e95babf8d3 log acctor init errors 2018-09-28 12:37:20 -07:00
Nikolay Kim
f2d42e5e77 refactor acceptor error handling 2018-09-28 11:50:47 -07:00
Nikolay Kim
0f1c80ccc6 deprecate start_incoming 2018-09-28 08:45:49 -07:00
Nikolay Kim
fc5088b55e fix tarpaulin args 2018-09-28 00:08:23 -07:00
Nikolay Kim
bec37fdbd5 update travis config 2018-09-27 22:23:29 -07:00
Nikolay Kim
4b59ae2476 fix ssl config for client connector 2018-09-27 22:15:38 -07:00
Nikolay Kim
d0fc9d7b99 simplify listen_ and bind_ methods 2018-09-27 21:55:44 -07:00
Nikolay Kim
1ff86e5ac4 restore rust-tls support 2018-09-27 21:24:21 -07:00
Nikolay Kim
ecfda64f6d add native-tls support 2018-09-27 20:40:34 -07:00
Nikolay Kim
0bca21ec6d fix ssl tests 2018-09-27 19:57:40 -07:00
Nikolay Kim
3173c9fa83 diesable client timeout for tcp stream acceptor 2018-09-27 19:34:07 -07:00
Nikolay Kim
85445ea809 rename and simplify ServiceFactory trait 2018-09-27 18:33:29 -07:00
Nikolay Kim
d57579d700 refactor acceptor pipeline add client timeout 2018-09-27 18:33:29 -07:00
Nikolay Kim
b6a1cfa6ad update openssl support 2018-09-27 18:33:29 -07:00
Nikolay Kim
9f1417af30 refactor http service builder 2018-09-27 18:33:29 -07:00
Nikolay Kim
0aa0f326f7 fix changes from master 2018-09-27 18:33:29 -07:00
Nikolay Kim
dbb4fab4f7 separate mod for HttpHandler; add HttpHandler impl for Vec<H> 2018-09-27 18:33:29 -07:00
Nikolay Kim
6f3e70a92a simplify application factory 2018-09-27 18:33:29 -07:00
Nikolay Kim
a63d3f9a7a cleanup ServerFactory trait 2018-09-27 18:33:29 -07:00
Nikolay Kim
a3cfc24232 refactor acceptor service 2018-09-27 18:33:29 -07:00
Nikolay Kim
6a61138bf8 enable ssl feature 2018-09-27 18:33:29 -07:00
Nikolay Kim
7cf9af9b55 disable ssl for travis 2018-09-27 18:33:29 -07:00
Nikolay Kim
c9a52e3197 refactor date generatioin 2018-09-27 18:33:29 -07:00
Nikolay Kim
1907102685 switch to actix-net server 2018-09-27 18:33:29 -07:00
Nikolay Kim
52195bbf16 update version 2018-09-27 18:17:58 -07:00
sapir
59deb4b40d Try to separate HTTP/1 read & write disconnect handling, to fix #511. (#514) 2018-09-27 18:15:02 -07:00
Ashley
782eeb5ded Reduced unsafe converage (#520) 2018-09-26 11:56:34 +03:00
Douman
1b298142e3 Correct composing of multiple origins in cors (#518) 2018-09-21 08:45:22 +03:00
Douman
0dc96658f2 Send response to inform client of error (#515) 2018-09-21 07:24:10 +03:00
Nikolay Kim
f40153fca4 fix node::insert() method, missing next element 2018-09-17 11:39:03 -07:00
Nikolay Kim
764103566d update changes 2018-09-17 10:48:37 -07:00
Nikolay Kim
bfb2f2e9e1 fix node.remove(), update next node pointer 2018-09-17 10:25:45 -07:00
Nikolay Kim
599e6b3385 refactor channel node remove operation 2018-09-17 05:29:07 -07:00
Nikolay Kim
03e318f446 update changes 2018-09-15 17:10:53 -07:00
Nikolay Kim
7449884ce3 fix wrong error message for path deserialize for i32 #510 2018-09-15 17:09:07 -07:00
Nikolay Kim
bbe69e5b8d update version 2018-09-15 10:00:54 -07:00
Nikolay Kim
9d1eefc38f use 5 seconds keep-alive timer by default 2018-09-15 09:57:54 -07:00
Nikolay Kim
d65c72b44d use server keep-alive timer as slow request timer 2018-09-15 09:55:38 -07:00
Nikolay Kim
c3f8b5cf22 clippy warnings 2018-09-11 11:25:32 -07:00
Nikolay Kim
70a3f317d3 fix failing requests to test server #508 2018-09-11 11:24:05 -07:00
Nikolay Kim
513c8ec1ce Merge pull request #505 from Neopallium/master
Fix issue with HttpChannel linked list.
2018-09-11 11:18:33 -07:00
Robert G. Jakabosky
04608b2ea6 Update changes. 2018-09-12 00:27:15 +08:00
Robert G. Jakabosky
70b45659e2 Make Node's traverse method take a closure instead of calling shutdown on each HttpChannel. 2018-09-12 00:27:15 +08:00
Robert G. Jakabosky
e0ae6b10cd Fix bug with HttpChannel linked list. 2018-09-12 00:27:15 +08:00
Maciej Piechotka
003b05b095 Don't ignore errors in std::fmt::Debug implementations (#506) 2018-09-11 14:57:55 +03:00
77 changed files with 5323 additions and 3231 deletions

View File

@@ -1,6 +1,6 @@
environment:
global:
PROJECT_NAME: actix
PROJECT_NAME: actix-web
matrix:
# Stable channel
- TARGET: i686-pc-windows-msvc
@@ -37,4 +37,5 @@ build: false
# Equivalent to Travis' `script` phase
test_script:
- cargo clean
- cargo test --no-default-features --features="flate2-rust"

View File

@@ -30,14 +30,17 @@ before_script:
script:
- |
if [[ "$TRAVIS_RUST_VERSION" != "stable" ]]; then
if [[ "$TRAVIS_RUST_VERSION" != "nightly" ]]; then
cargo clean
cargo test --features="alpn,tls,rust-tls" -- --nocapture
cargo check --features rust-tls
cargo check --features ssl
cargo check --features tls
cargo test --features="ssl,tls,rust-tls,uds" -- --nocapture
fi
- |
if [[ "$TRAVIS_RUST_VERSION" == "stable" ]]; then
if [[ "$TRAVIS_RUST_VERSION" == "nightly" ]]; then
RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install -f cargo-tarpaulin
cargo tarpaulin --features="alpn,tls,rust-tls" --out Xml --no-count
RUST_BACKTRACE=1 cargo tarpaulin --features="ssl,tls,rust-tls" --out Xml
bash <(curl -s https://codecov.io/bash)
echo "Uploaded code coverage"
fi
@@ -45,8 +48,8 @@ script:
# Upload docs
after_success:
- |
if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_PULL_REQUEST" = "false" && "$TRAVIS_BRANCH" == "master" && "$TRAVIS_RUST_VERSION" == "beta" ]]; then
cargo doc --features "alpn, tls, rust-tls, session" --no-deps &&
if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_PULL_REQUEST" = "false" && "$TRAVIS_BRANCH" == "master" && "$TRAVIS_RUST_VERSION" == "stable" ]]; then
cargo doc --features "ssl,tls,rust-tls,session" --no-deps &&
echo "<meta http-equiv=refresh content=0;url=os_balloon/index.html>" > target/doc/index.html &&
git clone https://github.com/davisp/ghp-import.git &&
./ghp-import/ghp_import.py -n -p -f -m "Documentation upload" -r https://"$GH_TOKEN"@github.com/"$TRAVIS_REPO_SLUG.git" target/doc &&

View File

@@ -1,5 +1,186 @@
# Changes
## [0.7.19] - 2019-03-29
### Added
* Add `from_file` and `from_file_with_config` to `NamedFile` to allow sending files without a known path. #670
* Add `insert` and `remove` methods to `HttpResponseBuilder`
* Add client HTTP Authentication methods `.basic_auth()` and `.bearer_auth()`. #540
* Add support for PATCH HTTP method
### Fixed
* Ignored the `If-Modified-Since` if `If-None-Match` is specified. #680
* Do not remove `Content-Length` on `Body::Empty` and insert zero value if it is missing for `POST` and `PUT` methods.
* Fix preflight CORS header compliance; refactor previous patch (#603). #717
* Fix never-ending HTTP2 request when response is empty (#709). #737
* Fix client payload decompression #674
## [0.7.18] - 2019-01-10
### Added
* Add `with_cookie` for `TestRequest` to allow users to customize request cookie. #647
* Add `cookie` method for `TestRequest` to allow users to add cookie dynamically.
### Fixed
* StaticFiles decode special characters in request's path
* Fix test server listener leak #654
## [0.7.17] - 2018-12-25
### Added
* Support for custom content types in `JsonConfig`. #637
* Send `HTTP/1.1 100 Continue` if request contains `expect: continue` header #634
### Fixed
* HTTP1 decoder should perform case-insentive comparison for client requests (e.g. `Keep-Alive`). #631
* Access-Control-Allow-Origin header should only a return a single, matching origin. #603
## [0.7.16] - 2018-12-11
### Added
* Implement `FromRequest` extractor for `Either<A,B>`
* Implement `ResponseError` for `SendError`
## [0.7.15] - 2018-12-05
### Changed
* `ClientConnector::resolver` now accepts `Into<Recipient>` instead of `Addr`. It enables user to implement own resolver.
* `QueryConfig` and `PathConfig` are made public.
* `AsyncResult::async` is changed to `AsyncResult::future` as `async` is reserved keyword in 2018 edition.
### Added
* By default, `Path` extractor now percent decode all characters. This behaviour can be disabled
with `PathConfig::default().disable_decoding()`
## [0.7.14] - 2018-11-14
### Added
* Add method to configure custom error handler to `Query` and `Path` extractors.
* Add method to configure `SameSite` option in `CookieIdentityPolicy`.
* By default, `Path` extractor now percent decode all characters. This behaviour can be disabled
with `PathConfig::default().disable_decoding()`
### Fixed
* Fix websockets connection drop if request contains "content-length" header #567
* Fix keep-alive timer reset
* HttpServer now treats streaming bodies the same for HTTP/1.x protocols. #549
* Set nodelay for socket #560
## [0.7.13] - 2018-10-14
### Fixed
* Fixed rustls support
* HttpServer not sending streamed request body on HTTP/2 requests #544
## [0.7.12] - 2018-10-10
### Changed
* Set min version for actix
* Set min version for actix-net
## [0.7.11] - 2018-10-09
### Fixed
* Fixed 204 responses for http/2
## [0.7.10] - 2018-10-09
### Fixed
* Fixed panic during graceful shutdown
## [0.7.9] - 2018-10-09
### Added
* Added client shutdown timeout setting
* Added slow request timeout setting
* Respond with 408 response on slow request timeout #523
### Fixed
* HTTP1 decoding errors are reported to the client. #512
* Correctly compose multiple allowed origins in CORS. #517
* Websocket server finished() isn't called if client disconnects #511
* Responses with the following codes: 100, 101, 102, 204 -- are sent without Content-Length header. #521
* Correct usage of `no_http2` flag in `bind_*` methods. #519
## [0.7.8] - 2018-09-17
### Added
* Use server `Keep-Alive` setting as slow request timeout #439
### Changed
* Use 5 seconds keep-alive timer by default.
### Fixed
* Fixed wrong error message for i16 type #510
## [0.7.7] - 2018-09-11
### Fixed
* Fix linked list of HttpChannels #504
* Fix requests to TestServer fail #508
## [0.7.6] - 2018-09-07
### Fixed

View File

@@ -1,6 +1,6 @@
[package]
name = "actix-web"
version = "0.7.6"
version = "0.7.19"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust."
readme = "README.md"
@@ -17,7 +17,7 @@ exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
build = "build.rs"
[package.metadata.docs.rs]
features = ["tls", "alpn", "rust-tls", "session", "brotli", "flate2-c"]
features = ["tls", "ssl", "rust-tls", "session", "brotli", "flate2-c"]
[badges]
travis-ci = { repository = "actix/actix-web", branch = "master" }
@@ -29,16 +29,19 @@ name = "actix_web"
path = "src/lib.rs"
[features]
default = ["session", "brotli", "flate2-c"]
default = ["session", "brotli", "flate2-c", "cell"]
# tls
tls = ["native-tls", "tokio-tls"]
tls = ["native-tls", "tokio-tls", "actix-net/tls"]
# openssl
alpn = ["openssl", "tokio-openssl"]
ssl = ["openssl", "tokio-openssl", "actix-net/ssl"]
# deprecated, use "ssl"
alpn = ["openssl", "tokio-openssl", "actix-net/ssl"]
# rustls
rust-tls = ["rustls", "tokio-rustls", "webpki", "webpki-roots"]
rust-tls = ["rustls", "tokio-rustls", "webpki", "webpki-roots", "actix-net/rust-tls"]
# unix sockets
uds = ["tokio-uds"]
@@ -55,21 +58,25 @@ flate2-c = ["flate2/miniz-sys"]
# rust backend for flate2 crate
flate2-rust = ["flate2/rust_backend"]
[dependencies]
actix = "0.7.0"
cell = ["actix-net/cell"]
base64 = "0.9"
[dependencies]
actix = "0.7.9"
actix-net = "0.2.6"
v_htmlescape = "0.4"
base64 = "0.10"
bitflags = "1.0"
failure = "^0.1.2"
h2 = "0.1"
htmlescape = "0.3"
http = "^0.1.8"
http = "^0.1.14"
httparse = "1.3"
log = "0.4"
mime = "0.3"
mime_guess = "2.0.0-alpha"
num_cpus = "1.0"
percent-encoding = "1.0"
rand = "0.5"
rand = "0.6"
regex = "1.0"
serde = "1.0"
serde_json = "1.0"
@@ -80,14 +87,13 @@ encoding = "0.2"
language-tags = "0.2"
lazy_static = "1.0"
lazycell = "1.0.0"
parking_lot = "0.6"
parking_lot = "0.7"
serde_urlencoded = "^0.5.3"
url = { version="1.7", features=["query_encoding"] }
cookie = { version="0.11", features=["percent-encode"] }
brotli2 = { version="^0.3.2", optional = true }
flate2 = { version="^1.0.2", optional = true, default-features = false }
failure = "^0.1.2"
# io
mio = "^0.6.13"
net2 = "0.2"
@@ -99,7 +105,7 @@ slab = "0.4"
tokio = "0.1"
tokio-io = "0.1"
tokio-tcp = "0.1"
tokio-timer = "0.2"
tokio-timer = "0.2.8"
tokio-reactor = "0.1"
tokio-current-thread = "0.1"
@@ -112,18 +118,16 @@ openssl = { version="0.10", optional = true }
tokio-openssl = { version="0.2", optional = true }
#rustls
rustls = { version = "^0.13.1", optional = true }
tokio-rustls = { version = "^0.7.2", optional = true }
rustls = { version = "0.14", optional = true }
tokio-rustls = { version = "0.8", optional = true }
webpki = { version = "0.18", optional = true }
webpki-roots = { version = "0.15", optional = true }
# unix sockets
tokio-uds = { version="0.2", optional = true }
serde_urlencoded = "^0.5.3"
[dev-dependencies]
env_logger = "0.5"
env_logger = "0.6"
serde_derive = "1.0"
[build-dependencies]
@@ -133,8 +137,3 @@ version_check = "0.1"
lto = true
opt-level = 3
codegen-units = 1
[workspace]
members = [
"./",
]

View File

@@ -1,3 +1,33 @@
## 0.7.15
* The `' '` character is not percent decoded anymore before matching routes. If you need to use it in
your routes, you should use `%20`.
instead of
```rust
fn main() {
let app = App::new().resource("/my index", |r| {
r.method(http::Method::GET)
.with(index);
});
}
```
use
```rust
fn main() {
let app = App::new().resource("/my%20index", |r| {
r.method(http::Method::GET)
.with(index);
});
}
```
* If you used `AsyncResult::async` you need to replace it with `AsyncResult::future`
## 0.7.4
* `Route::with_config()`/`Route::with_async_config()` always passes configuration objects as tuple

View File

@@ -8,13 +8,13 @@ Actix web is a simple, pragmatic and extremely fast web framework for Rust.
* Client/server [WebSockets](https://actix.rs/docs/websockets/) support
* Transparent content compression/decompression (br, gzip, deflate)
* Configurable [request routing](https://actix.rs/docs/url-dispatch/)
* Graceful server shutdown
* Multipart streams
* Static assets
* SSL support with OpenSSL or `native-tls`
* Middlewares ([Logger, Session, CORS, CSRF, etc](https://actix.rs/docs/middleware/))
* Includes an asynchronous [HTTP client](https://actix.rs/actix-web/actix_web/client/index.html)
* Built on top of [Actix actor framework](https://github.com/actix/actix)
* Experimental [Async/Await](https://github.com/mehcode/actix-web-async-await) support.
## Documentation & community resources
@@ -23,7 +23,7 @@ Actix web is a simple, pragmatic and extremely fast web framework for Rust.
* [API Documentation (Releases)](https://actix.rs/api/actix-web/stable/actix_web/)
* [Chat on gitter](https://gitter.im/actix/actix)
* Cargo package: [actix-web](https://crates.io/crates/actix-web)
* Minimum supported Rust version: 1.26 or later
* Minimum supported Rust version: 1.31 or later
## Example
@@ -51,7 +51,7 @@ fn main() {
* [Protobuf support](https://github.com/actix/examples/tree/master/protobuf/)
* [Multipart streams](https://github.com/actix/examples/tree/master/multipart/)
* [Simple websocket](https://github.com/actix/examples/tree/master/websocket/)
* [Tera](https://github.com/actix/examples/tree/master/template_tera/) /
* [Tera](https://github.com/actix/examples/tree/master/template_tera/) /
[Askama](https://github.com/actix/examples/tree/master/template_askama/) templates
* [Diesel integration](https://github.com/actix/examples/tree/master/diesel/)
* [r2d2](https://github.com/actix/examples/tree/master/r2d2/)
@@ -66,8 +66,6 @@ You may consider checking out
* [TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r16&hw=ph&test=plaintext)
* Some basic benchmarks could be found in this [repository](https://github.com/fafhrd91/benchmarks).
## License
This project is licensed under either of

View File

@@ -135,7 +135,7 @@ where
/// instance for each thread, thus application state must be constructed
/// multiple times. If you want to share state between different
/// threads, a shared object should be used, e.g. `Arc`. Application
/// state does not need to be `Send` and `Sync`.
/// state does not need to be `Send` or `Sync`.
pub fn with_state(state: S) -> App<S> {
App {
parts: Some(ApplicationParts {

View File

@@ -1,5 +1,6 @@
use bytes::{Bytes, BytesMut};
use futures::Stream;
use std::borrow::Cow;
use std::sync::Arc;
use std::{fmt, mem};
@@ -194,12 +195,30 @@ impl From<Vec<u8>> for Binary {
}
}
impl From<Cow<'static, [u8]>> for Binary {
fn from(b: Cow<'static, [u8]>) -> Binary {
match b {
Cow::Borrowed(s) => Binary::Slice(s),
Cow::Owned(vec) => Binary::Bytes(Bytes::from(vec)),
}
}
}
impl From<String> for Binary {
fn from(s: String) -> Binary {
Binary::Bytes(Bytes::from(s))
}
}
impl From<Cow<'static, str>> for Binary {
fn from(s: Cow<'static, str>) -> Binary {
match s {
Cow::Borrowed(s) => Binary::Slice(s.as_ref()),
Cow::Owned(s) => Binary::Bytes(Bytes::from(s)),
}
}
}
impl<'a> From<&'a String> for Binary {
fn from(s: &'a String) -> Binary {
Binary::Bytes(Bytes::from(AsRef::<[u8]>::as_ref(&s)))
@@ -287,6 +306,16 @@ mod tests {
assert_eq!(Binary::from("test").as_ref(), b"test");
}
#[test]
fn test_cow_str() {
let cow: Cow<'static, str> = Cow::Borrowed("test");
assert_eq!(Binary::from(cow.clone()).len(), 4);
assert_eq!(Binary::from(cow.clone()).as_ref(), b"test");
let cow: Cow<'static, str> = Cow::Owned("test".to_owned());
assert_eq!(Binary::from(cow.clone()).len(), 4);
assert_eq!(Binary::from(cow.clone()).as_ref(), b"test");
}
#[test]
fn test_static_bytes() {
assert_eq!(Binary::from(b"test".as_ref()).len(), 4);
@@ -307,6 +336,16 @@ mod tests {
assert_eq!(Binary::from(Bytes::from("test")).as_ref(), b"test");
}
#[test]
fn test_cow_bytes() {
let cow: Cow<'static, [u8]> = Cow::Borrowed(b"test");
assert_eq!(Binary::from(cow.clone()).len(), 4);
assert_eq!(Binary::from(cow.clone()).as_ref(), b"test");
let cow: Cow<'static, [u8]> = Cow::Owned(Vec::from("test"));
assert_eq!(Binary::from(cow.clone()).len(), 4);
assert_eq!(Binary::from(cow.clone()).as_ref(), b"test");
}
#[test]
fn test_arc_string() {
let b = Arc::new("test".to_owned());

View File

@@ -3,9 +3,9 @@ use std::net::Shutdown;
use std::time::{Duration, Instant};
use std::{fmt, io, mem, time};
use actix::resolver::{Connect as ResolveConnect, Resolver, ResolverError};
use actix::{
fut, Actor, ActorFuture, ActorResponse, Addr, AsyncContext, Context,
use actix_inner::actors::resolver::{Connect as ResolveConnect, Resolver, ResolverError};
use actix_inner::{
fut, Actor, ActorFuture, ActorResponse, AsyncContext, Context,
ContextFutureSpawner, Handler, Message, Recipient, StreamHandler, Supervised,
SystemService, WrapFuture,
};
@@ -16,13 +16,16 @@ use http::{Error as HttpError, HttpTryFrom, Uri};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_timer::Delay;
#[cfg(feature = "alpn")]
#[cfg(any(feature = "alpn", feature = "ssl"))]
use {
openssl::ssl::{Error as SslError, SslConnector, SslMethod},
tokio_openssl::SslConnectorExt,
};
#[cfg(all(feature = "tls", not(feature = "alpn")))]
#[cfg(all(
feature = "tls",
not(any(feature = "alpn", feature = "ssl", feature = "rust-tls"))
))]
use {
native_tls::{Error as SslError, TlsConnector as NativeTlsConnector},
tokio_tls::TlsConnector as SslConnector,
@@ -30,20 +33,19 @@ use {
#[cfg(all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
not(any(feature = "alpn", feature = "tls", feature = "ssl"))
))]
use {
rustls::ClientConfig, std::io::Error as SslError, std::sync::Arc,
tokio_rustls::ClientConfigExt, webpki::DNSNameRef, webpki_roots,
tokio_rustls::TlsConnector as SslConnector, webpki::DNSNameRef, webpki_roots,
};
#[cfg(all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
))]
type SslConnector = Arc<ClientConfig>;
#[cfg(not(any(feature = "alpn", feature = "tls", feature = "rust-tls")))]
#[cfg(not(any(
feature = "alpn",
feature = "ssl",
feature = "tls",
feature = "rust-tls"
)))]
type SslConnector = ();
use server::IoStream;
@@ -150,7 +152,12 @@ pub enum ClientConnectorError {
SslIsNotSupported,
/// SSL error
#[cfg(any(feature = "tls", feature = "alpn", feature = "rust-tls"))]
#[cfg(any(
feature = "tls",
feature = "alpn",
feature = "ssl",
feature = "rust-tls",
))]
#[fail(display = "{}", _0)]
SslError(#[cause] SslError),
@@ -213,7 +220,7 @@ pub struct ClientConnector {
acq_tx: mpsc::UnboundedSender<AcquiredConnOperation>,
acq_rx: Option<mpsc::UnboundedReceiver<AcquiredConnOperation>>,
resolver: Option<Addr<Resolver>>,
resolver: Option<Recipient<ResolveConnect>>,
conn_lifetime: Duration,
conn_keep_alive: Duration,
limit: usize,
@@ -232,7 +239,7 @@ impl Actor for ClientConnector {
fn started(&mut self, ctx: &mut Self::Context) {
if self.resolver.is_none() {
self.resolver = Some(Resolver::from_registry())
self.resolver = Some(Resolver::from_registry().recipient())
}
self.collect_periodic(ctx);
ctx.add_stream(self.acq_rx.take().unwrap());
@@ -247,40 +254,46 @@ impl SystemService for ClientConnector {}
impl Default for ClientConnector {
fn default() -> ClientConnector {
let connector = {
#[cfg(all(feature = "alpn"))]
#[cfg(all(any(feature = "alpn", feature = "ssl")))]
{
SslConnector::builder(SslMethod::tls()).unwrap().build()
}
#[cfg(all(feature = "tls", not(feature = "alpn")))]
#[cfg(all(
feature = "tls",
not(any(feature = "alpn", feature = "ssl", feature = "rust-tls"))
))]
{
NativeTlsConnector::builder().build().unwrap().into()
}
#[cfg(all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
not(any(feature = "alpn", feature = "tls", feature = "ssl"))
))]
{
let mut config = ClientConfig::new();
config
.root_store
.add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS);
Arc::new(config)
SslConnector::from(Arc::new(config))
}
#[cfg(not(any(feature = "alpn", feature = "tls", feature = "rust-tls")))]
#[cfg_attr(rustfmt, rustfmt_skip)]
#[cfg(not(any(
feature = "alpn", feature = "ssl", feature = "tls", feature = "rust-tls")))]
{
()
}
};
#[cfg_attr(feature = "cargo-clippy", allow(let_unit_value))]
ClientConnector::with_connector_impl(connector)
}
}
impl ClientConnector {
#[cfg(feature = "alpn")]
#[cfg(any(feature = "alpn", feature = "ssl"))]
/// Create `ClientConnector` actor with custom `SslConnector` instance.
///
/// By default `ClientConnector` uses very a simple SSL configuration.
@@ -325,7 +338,7 @@ impl ClientConnector {
#[cfg(all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
not(any(feature = "alpn", feature = "ssl", feature = "tls"))
))]
/// Create `ClientConnector` actor with custom `SslConnector` instance.
///
@@ -354,7 +367,7 @@ impl ClientConnector {
/// config
/// .root_store
/// .add_server_trust_anchors(&webpki_roots::TLS_SERVER_ROOTS);
/// let conn = ClientConnector::with_connector(Arc::new(config)).start();
/// let conn = ClientConnector::with_connector(config).start();
///
/// conn.send(
/// Connect::new("https://www.rust-lang.org").unwrap()) // <- connect to host
@@ -371,12 +384,12 @@ impl ClientConnector {
/// ```
pub fn with_connector(connector: ClientConfig) -> ClientConnector {
// keep level of indirection for docstrings matching featureflags
Self::with_connector_impl(Arc::new(connector))
Self::with_connector_impl(SslConnector::from(Arc::new(connector)))
}
#[cfg(all(
feature = "tls",
not(any(feature = "alpn", feature = "rust-tls"))
not(any(feature = "ssl", feature = "alpn", feature = "rust-tls"))
))]
/// Create `ClientConnector` actor with custom `SslConnector` instance.
///
@@ -490,8 +503,10 @@ impl ClientConnector {
}
/// Use custom resolver actor
pub fn resolver(mut self, addr: Addr<Resolver>) -> Self {
self.resolver = Some(addr);
///
/// By default actix's Resolver is used.
pub fn resolver<A: Into<Recipient<ResolveConnect>>>(mut self, addr: A) -> Self {
self.resolver = Some(addr.into());
self
}
@@ -714,7 +729,7 @@ impl ClientConnector {
act.release_key(&key2);
()
}).and_then(move |res, act, _| {
#[cfg(feature = "alpn")]
#[cfg(any(feature = "alpn", feature = "ssl"))]
match res {
Err(err) => {
let _ = waiter.tx.send(Err(err.into()));
@@ -756,7 +771,7 @@ impl ClientConnector {
}
}
#[cfg(all(feature = "tls", not(feature = "alpn")))]
#[cfg(all(feature = "tls", not(any(feature = "alpn", feature = "ssl"))))]
match res {
Err(err) => {
let _ = waiter.tx.send(Err(err.into()));
@@ -800,7 +815,7 @@ impl ClientConnector {
#[cfg(all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "tls"))
not(any(feature = "alpn", feature = "ssl", feature = "tls"))
))]
match res {
Err(err) => {
@@ -813,7 +828,7 @@ impl ClientConnector {
let host = DNSNameRef::try_from_ascii_str(&key.host).unwrap();
fut::Either::A(
act.connector
.connect_async(host, stream)
.connect(host, stream)
.into_actor(act)
.then(move |res, _, _| {
match res {
@@ -844,7 +859,12 @@ impl ClientConnector {
}
}
#[cfg(not(any(feature = "alpn", feature = "tls", feature = "rust-tls")))]
#[cfg(not(any(
feature = "alpn",
feature = "ssl",
feature = "tls",
feature = "rust-tls"
)))]
match res {
Err(err) => {
let _ = waiter.tx.send(Err(err.into()));
@@ -922,7 +942,7 @@ impl Handler<Connect> for ClientConnector {
}
let host = uri.host().unwrap().to_owned();
let port = uri.port().unwrap_or_else(|| proto.port());
let port = uri.port_part().map(|port| port.as_u16()).unwrap_or_else(|| proto.port());
let key = Key {
host,
port,
@@ -1260,6 +1280,11 @@ impl IoStream for Connection {
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
IoStream::set_linger(&mut *self.stream, dur)
}
#[inline]
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
IoStream::set_keepalive(&mut *self.stream, dur)
}
}
impl io::Read for Connection {
@@ -1307,4 +1332,9 @@ impl<Io: IoStream> IoStream for TlsStream<Io> {
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_linger(dur)
}
#[inline]
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_keepalive(dur)
}
}

View File

@@ -2,11 +2,12 @@
//!
//! ```rust
//! # extern crate actix_web;
//! # extern crate actix;
//! # extern crate futures;
//! # extern crate tokio;
//! # use futures::Future;
//! # use std::process;
//! use actix_web::{actix, client};
//! use actix_web::client;
//! use futures::Future;
//!
//! fn main() {
//! actix::run(
@@ -61,12 +62,13 @@ impl ResponseError for SendRequestError {
///
/// ```rust
/// # extern crate actix_web;
/// # extern crate actix;
/// # extern crate futures;
/// # extern crate tokio;
/// # extern crate env_logger;
/// # use futures::Future;
/// # use std::process;
/// use actix_web::{actix, client};
/// use actix_web::client;
/// use futures::Future;
///
/// fn main() {
/// actix::run(
@@ -103,6 +105,13 @@ pub fn post<U: AsRef<str>>(uri: U) -> ClientRequestBuilder {
builder
}
/// Create request builder for `PATCH` requests
pub fn patch<U: AsRef<str>>(uri: U) -> ClientRequestBuilder {
let mut builder = ClientRequest::build();
builder.method(Method::PATCH).uri(uri);
builder
}
/// Create request builder for `PUT` requests
pub fn put<U: AsRef<str>>(uri: U) -> ClientRequestBuilder {
let mut builder = ClientRequest::build();

View File

@@ -56,7 +56,7 @@ impl HttpResponseParser {
return Ok(Async::Ready(msg));
}
Async::NotReady => {
if buf.capacity() >= MAX_BUFFER_SIZE {
if buf.len() >= MAX_BUFFER_SIZE {
return Err(HttpResponseParserError::Error(
ParseError::TooLarge,
));

View File

@@ -6,7 +6,8 @@ use std::time::{Duration, Instant};
use std::{io, mem};
use tokio_timer::Delay;
use actix::{Addr, Request, SystemService};
use actix::{Addr, SystemService};
use actix_inner::dev::Request;
use super::{
ClientConnector, ClientConnectorError, ClientRequest, ClientResponse, Connect,
@@ -87,7 +88,8 @@ impl SendRequest {
}
pub(crate) fn with_connector(
req: ClientRequest, conn: Addr<ClientConnector>,
req: ClientRequest,
conn: Addr<ClientConnector>,
) -> SendRequest {
SendRequest {
req,
@@ -362,11 +364,11 @@ impl Pipeline {
if let Some(ref mut decompress) = self.decompress {
match decompress.feed_data(b) {
Ok(Some(b)) => return Ok(Async::Ready(Some(b))),
Ok(None) => return Ok(Async::NotReady),
Ok(None) => continue,
Err(ref err)
if err.kind() == io::ErrorKind::WouldBlock =>
{
continue
continue;
}
Err(err) => return Err(err.into()),
}

View File

@@ -12,6 +12,7 @@ use serde::Serialize;
use serde_json;
use serde_urlencoded;
use url::Url;
use base64::encode;
use super::connector::{ClientConnector, Connection};
use super::pipeline::SendRequest;
@@ -27,11 +28,12 @@ use httprequest::HttpRequest;
///
/// ```rust
/// # extern crate actix_web;
/// # extern crate actix;
/// # extern crate futures;
/// # extern crate tokio;
/// # use futures::Future;
/// # use std::process;
/// use actix_web::{actix, client};
/// use actix_web::client;
///
/// fn main() {
/// actix::run(
@@ -110,6 +112,13 @@ impl ClientRequest {
builder
}
/// Create request builder for `PATCH` request
pub fn patch<U: AsRef<str>>(uri: U) -> ClientRequestBuilder {
let mut builder = ClientRequest::build();
builder.method(Method::PATCH).uri(uri);
builder
}
/// Create request builder for `PUT` request
pub fn put<U: AsRef<str>>(uri: U) -> ClientRequestBuilder {
let mut builder = ClientRequest::build();
@@ -254,16 +263,16 @@ impl ClientRequest {
impl fmt::Debug for ClientRequest {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let res = writeln!(
writeln!(
f,
"\nClientRequest {:?} {}:{}",
self.version, self.method, self.uri
);
let _ = writeln!(f, " headers:");
)?;
writeln!(f, " headers:")?;
for (key, val) in self.headers.iter() {
let _ = writeln!(f, " {:?}: {:?}", key, val);
writeln!(f, " {:?}: {:?}", key, val)?;
}
res
Ok(())
}
}
@@ -484,6 +493,29 @@ impl ClientRequestBuilder {
self
}
/// Set HTTP basic authorization
pub fn basic_auth<U, P>(&mut self, username: U, password: Option<P>) -> &mut Self
where
U: fmt::Display,
P: fmt::Display,
{
let auth = match password {
Some(password) => format!("{}:{}", username, password),
None => format!("{}", username)
};
let header_value = format!("Basic {}", encode(&auth));
self.header(header::AUTHORIZATION, &*header_value)
}
/// Set HTTP bearer authentication
pub fn bearer_auth<T>( &mut self, token: T) -> &mut Self
where
T: fmt::Display,
{
let header_value = format!("Bearer {}", token);
self.header(header::AUTHORIZATION, &*header_value)
}
/// Set content length
#[inline]
pub fn content_length(&mut self, len: u64) -> &mut Self {
@@ -631,7 +663,7 @@ impl ClientRequestBuilder {
if !parts.headers.contains_key(header::HOST) {
let mut wrt = BytesMut::with_capacity(host.len() + 5).writer();
let _ = match parts.uri.port() {
let _ = match parts.uri.port_part().map(|port| port.as_u16()) {
None | Some(80) | Some(443) => write!(wrt, "{}", host),
Some(port) => write!(wrt, "{}:{}", host, port),
};
@@ -750,16 +782,16 @@ fn parts<'a>(
impl fmt::Debug for ClientRequestBuilder {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
if let Some(ref parts) = self.request {
let res = writeln!(
writeln!(
f,
"\nClientRequestBuilder {:?} {}:{}",
parts.version, parts.method, parts.uri
);
let _ = writeln!(f, " headers:");
)?;
writeln!(f, " headers:")?;
for (key, val) in parts.headers.iter() {
let _ = writeln!(f, " {:?}: {:?}", key, val);
writeln!(f, " {:?}: {:?}", key, val)?;
}
res
Ok(())
} else {
write!(f, "ClientRequestBuilder(Consumed)")
}

View File

@@ -95,12 +95,12 @@ impl ClientResponse {
impl fmt::Debug for ClientResponse {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let res = writeln!(f, "\nClientResponse {:?} {}", self.version(), self.status());
let _ = writeln!(f, " headers:");
writeln!(f, "\nClientResponse {:?} {}", self.version(), self.status())?;
writeln!(f, " headers:")?;
for (key, val) in self.headers().iter() {
let _ = writeln!(f, " {:?}: {:?}", key, val);
writeln!(f, " {:?}: {:?}", key, val)?;
}
res
Ok(())
}
}

View File

@@ -1,4 +1,7 @@
#![cfg_attr(feature = "cargo-clippy", allow(redundant_field_names))]
#![cfg_attr(
feature = "cargo-clippy",
allow(redundant_field_names)
)]
use std::cell::RefCell;
use std::fmt::Write as FmtWrite;
@@ -13,9 +16,9 @@ use flate2::write::{GzEncoder, ZlibEncoder};
use flate2::Compression;
use futures::{Async, Poll};
use http::header::{
HeaderValue, CONNECTION, CONTENT_ENCODING, CONTENT_LENGTH, DATE, TRANSFER_ENCODING,
self, HeaderValue, CONNECTION, CONTENT_ENCODING, CONTENT_LENGTH, DATE, TRANSFER_ENCODING,
};
use http::{HttpTryFrom, Version};
use http::{Method, HttpTryFrom, Version};
use time::{self, Duration};
use tokio_io::AsyncWrite;
@@ -220,7 +223,19 @@ fn content_encoder(buf: BytesMut, req: &mut ClientRequest) -> Output {
let transfer = match body {
Body::Empty => {
req.headers_mut().remove(CONTENT_LENGTH);
match req.method() {
//Insert zero content-length only if user hasn't added it.
//We don't really need it for other methods as they are not supposed to carry payload
&Method::POST | &Method::PUT | &Method::PATCH => {
req.headers_mut()
.entry(CONTENT_LENGTH)
.expect("CONTENT_LENGTH to be valid header name")
.or_insert(header::HeaderValue::from_static("0"));
},
_ => {
req.headers_mut().remove(CONTENT_LENGTH);
}
}
return Output::Empty(buf);
}
Body::Binary(ref mut bytes) => {
@@ -407,3 +422,76 @@ impl CachedDate {
self.next_update.nsec = 0;
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_content_encoder_empty_body() {
let mut req = ClientRequest::post("http://google.com").finish().expect("Create request");
let result = content_encoder(BytesMut::new(), &mut req);
match result {
Output::Empty(buf) => {
assert_eq!(buf.len(), 0);
let content_len = req.headers().get(CONTENT_LENGTH).expect("To set Content-Length for empty POST");
assert_eq!(content_len, "0");
},
_ => panic!("Unexpected result, should be Output::Empty"),
}
req.set_method(Method::GET);
let result = content_encoder(BytesMut::new(), &mut req);
match result {
Output::Empty(buf) => {
assert_eq!(buf.len(), 0);
assert!(!req.headers().contains_key(CONTENT_LENGTH));
},
_ => panic!("Unexpected result, should be Output::Empty"),
}
req.set_method(Method::PUT);
let result = content_encoder(BytesMut::new(), &mut req);
match result {
Output::Empty(buf) => {
assert_eq!(buf.len(), 0);
let content_len = req.headers().get(CONTENT_LENGTH).expect("To set Content-Length for empty PUT");
assert_eq!(content_len, "0");
},
_ => panic!("Unexpected result, should be Output::Empty"),
}
req.set_method(Method::DELETE);
let result = content_encoder(BytesMut::new(), &mut req);
match result {
Output::Empty(buf) => {
assert_eq!(buf.len(), 0);
assert!(!req.headers().contains_key(CONTENT_LENGTH));
},
_ => panic!("Unexpected result, should be Output::Empty"),
}
req.set_method(Method::PATCH);
let result = content_encoder(BytesMut::new(), &mut req);
match result {
Output::Empty(buf) => {
assert_eq!(buf.len(), 0);
let content_len = req.headers().get(CONTENT_LENGTH).expect("To set Content-Length for empty PATCH");
assert_eq!(content_len, "0");
},
_ => panic!("Unexpected result, should be Output::Empty"),
}
}
}

View File

@@ -1,7 +1,10 @@
use std::rc::Rc;
use serde::de::{self, Deserializer, Error as DeError, Visitor};
use httprequest::HttpRequest;
use param::ParamsIter;
use uri::RESERVED_QUOTER;
macro_rules! unsupported_type {
($trait_fn:ident, $name:expr) => {
@@ -13,6 +16,20 @@ macro_rules! unsupported_type {
};
}
macro_rules! percent_decode_if_needed {
($value:expr, $decode:expr) => {
if $decode {
if let Some(ref mut value) = RESERVED_QUOTER.requote($value.as_bytes()) {
Rc::make_mut(value).parse()
} else {
$value.parse()
}
} else {
$value.parse()
}
}
}
macro_rules! parse_single_value {
($trait_fn:ident, $visit_fn:ident, $tp:tt) => {
fn $trait_fn<V>(self, visitor: V) -> Result<V::Value, Self::Error>
@@ -23,11 +40,11 @@ macro_rules! parse_single_value {
format!("wrong number of parameters: {} expected 1",
self.req.match_info().len()).as_str()))
} else {
let v = self.req.match_info()[0].parse().map_err(
|_| de::value::Error::custom(
format!("can not parse {:?} to a {}",
&self.req.match_info()[0], $tp)))?;
visitor.$visit_fn(v)
let v_parsed = percent_decode_if_needed!(&self.req.match_info()[0], self.decode)
.map_err(|_| de::value::Error::custom(
format!("can not parse {:?} to a {}", &self.req.match_info()[0], $tp)
))?;
visitor.$visit_fn(v_parsed)
}
}
}
@@ -35,11 +52,12 @@ macro_rules! parse_single_value {
pub struct PathDeserializer<'de, S: 'de> {
req: &'de HttpRequest<S>,
decode: bool,
}
impl<'de, S: 'de> PathDeserializer<'de, S> {
pub fn new(req: &'de HttpRequest<S>) -> Self {
PathDeserializer { req }
pub fn new(req: &'de HttpRequest<S>, decode: bool) -> Self {
PathDeserializer { req, decode }
}
}
@@ -53,6 +71,7 @@ impl<'de, S: 'de> Deserializer<'de> for PathDeserializer<'de, S> {
visitor.visit_map(ParamsDeserializer {
params: self.req.match_info().iter(),
current: None,
decode: self.decode,
})
}
@@ -107,6 +126,7 @@ impl<'de, S: 'de> Deserializer<'de> for PathDeserializer<'de, S> {
} else {
visitor.visit_seq(ParamsSeq {
params: self.req.match_info().iter(),
decode: self.decode,
})
}
}
@@ -128,6 +148,7 @@ impl<'de, S: 'de> Deserializer<'de> for PathDeserializer<'de, S> {
} else {
visitor.visit_seq(ParamsSeq {
params: self.req.match_info().iter(),
decode: self.decode,
})
}
}
@@ -141,28 +162,13 @@ impl<'de, S: 'de> Deserializer<'de> for PathDeserializer<'de, S> {
Err(de::value::Error::custom("unsupported type: enum"))
}
fn deserialize_str<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
if self.req.match_info().len() != 1 {
Err(de::value::Error::custom(
format!(
"wrong number of parameters: {} expected 1",
self.req.match_info().len()
).as_str(),
))
} else {
visitor.visit_str(&self.req.match_info()[0])
}
}
fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where
V: Visitor<'de>,
{
visitor.visit_seq(ParamsSeq {
params: self.req.match_info().iter(),
decode: self.decode,
})
}
@@ -175,7 +181,7 @@ impl<'de, S: 'de> Deserializer<'de> for PathDeserializer<'de, S> {
parse_single_value!(deserialize_bool, visit_bool, "bool");
parse_single_value!(deserialize_i8, visit_i8, "i8");
parse_single_value!(deserialize_i16, visit_i16, "i16");
parse_single_value!(deserialize_i32, visit_i32, "i16");
parse_single_value!(deserialize_i32, visit_i32, "i32");
parse_single_value!(deserialize_i64, visit_i64, "i64");
parse_single_value!(deserialize_u8, visit_u8, "u8");
parse_single_value!(deserialize_u16, visit_u16, "u16");
@@ -184,13 +190,16 @@ impl<'de, S: 'de> Deserializer<'de> for PathDeserializer<'de, S> {
parse_single_value!(deserialize_f32, visit_f32, "f32");
parse_single_value!(deserialize_f64, visit_f64, "f64");
parse_single_value!(deserialize_string, visit_string, "String");
parse_single_value!(deserialize_str, visit_string, "String");
parse_single_value!(deserialize_byte_buf, visit_string, "String");
parse_single_value!(deserialize_char, visit_char, "char");
}
struct ParamsDeserializer<'de> {
params: ParamsIter<'de>,
current: Option<(&'de str, &'de str)>,
decode: bool,
}
impl<'de> de::MapAccess<'de> for ParamsDeserializer<'de> {
@@ -212,7 +221,7 @@ impl<'de> de::MapAccess<'de> for ParamsDeserializer<'de> {
V: de::DeserializeSeed<'de>,
{
if let Some((_, value)) = self.current.take() {
seed.deserialize(Value { value })
seed.deserialize(Value { value, decode: self.decode })
} else {
Err(de::value::Error::custom("unexpected item"))
}
@@ -252,16 +261,18 @@ macro_rules! parse_value {
fn $trait_fn<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: Visitor<'de>
{
let v = self.value.parse().map_err(
|_| de::value::Error::custom(
format!("can not parse {:?} to a {}", self.value, $tp)))?;
visitor.$visit_fn(v)
let v_parsed = percent_decode_if_needed!(&self.value, self.decode)
.map_err(|_| de::value::Error::custom(
format!("can not parse {:?} to a {}", &self.value, $tp)
))?;
visitor.$visit_fn(v_parsed)
}
}
}
struct Value<'de> {
value: &'de str,
decode: bool,
}
impl<'de> Deserializer<'de> for Value<'de> {
@@ -377,6 +388,7 @@ impl<'de> Deserializer<'de> for Value<'de> {
struct ParamsSeq<'de> {
params: ParamsIter<'de>,
decode: bool,
}
impl<'de> de::SeqAccess<'de> for ParamsSeq<'de> {
@@ -387,7 +399,7 @@ impl<'de> de::SeqAccess<'de> for ParamsSeq<'de> {
T: de::DeserializeSeed<'de>,
{
match self.params.next() {
Some(item) => Ok(Some(seed.deserialize(Value { value: item.1 })?)),
Some(item) => Ok(Some(seed.deserialize(Value { value: item.1, decode: self.decode })?)),
None => Ok(None),
}
}

View File

@@ -5,7 +5,7 @@ use std::string::FromUtf8Error;
use std::sync::Mutex;
use std::{fmt, io, result};
use actix::MailboxError;
use actix::{MailboxError, SendError};
use cookie;
use failure::{self, Backtrace, Fail};
use futures::Canceled;
@@ -136,6 +136,10 @@ pub trait ResponseError: Fail + InternalResponseErrorAsFail {
}
}
impl<T> ResponseError for SendError<T>
where T: Send + Sync + 'static {
}
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&self.cause, f)
@@ -759,6 +763,16 @@ where
InternalError::new(err, StatusCode::UNAUTHORIZED).into()
}
/// Helper function that creates wrapper of any error and generate
/// *PAYMENT_REQUIRED* response.
#[allow(non_snake_case)]
pub fn ErrorPaymentRequired<T>(err: T) -> Error
where
T: Send + Sync + fmt::Debug + fmt::Display + 'static,
{
InternalError::new(err, StatusCode::PAYMENT_REQUIRED).into()
}
/// Helper function that creates wrapper of any error and generate *FORBIDDEN*
/// response.
#[allow(non_snake_case)]
@@ -789,6 +803,26 @@ where
InternalError::new(err, StatusCode::METHOD_NOT_ALLOWED).into()
}
/// Helper function that creates wrapper of any error and generate *NOT
/// ACCEPTABLE* response.
#[allow(non_snake_case)]
pub fn ErrorNotAcceptable<T>(err: T) -> Error
where
T: Send + Sync + fmt::Debug + fmt::Display + 'static,
{
InternalError::new(err, StatusCode::NOT_ACCEPTABLE).into()
}
/// Helper function that creates wrapper of any error and generate *PROXY
/// AUTHENTICATION REQUIRED* response.
#[allow(non_snake_case)]
pub fn ErrorProxyAuthenticationRequired<T>(err: T) -> Error
where
T: Send + Sync + fmt::Debug + fmt::Display + 'static,
{
InternalError::new(err, StatusCode::PROXY_AUTHENTICATION_REQUIRED).into()
}
/// Helper function that creates wrapper of any error and generate *REQUEST
/// TIMEOUT* response.
#[allow(non_snake_case)]
@@ -819,6 +853,16 @@ where
InternalError::new(err, StatusCode::GONE).into()
}
/// Helper function that creates wrapper of any error and generate *LENGTH
/// REQUIRED* response.
#[allow(non_snake_case)]
pub fn ErrorLengthRequired<T>(err: T) -> Error
where
T: Send + Sync + fmt::Debug + fmt::Display + 'static,
{
InternalError::new(err, StatusCode::LENGTH_REQUIRED).into()
}
/// Helper function that creates wrapper of any error and generate
/// *PRECONDITION FAILED* response.
#[allow(non_snake_case)]
@@ -829,6 +873,46 @@ where
InternalError::new(err, StatusCode::PRECONDITION_FAILED).into()
}
/// Helper function that creates wrapper of any error and generate
/// *PAYLOAD TOO LARGE* response.
#[allow(non_snake_case)]
pub fn ErrorPayloadTooLarge<T>(err: T) -> Error
where
T: Send + Sync + fmt::Debug + fmt::Display + 'static,
{
InternalError::new(err, StatusCode::PAYLOAD_TOO_LARGE).into()
}
/// Helper function that creates wrapper of any error and generate
/// *URI TOO LONG* response.
#[allow(non_snake_case)]
pub fn ErrorUriTooLong<T>(err: T) -> Error
where
T: Send + Sync + fmt::Debug + fmt::Display + 'static,
{
InternalError::new(err, StatusCode::URI_TOO_LONG).into()
}
/// Helper function that creates wrapper of any error and generate
/// *UNSUPPORTED MEDIA TYPE* response.
#[allow(non_snake_case)]
pub fn ErrorUnsupportedMediaType<T>(err: T) -> Error
where
T: Send + Sync + fmt::Debug + fmt::Display + 'static,
{
InternalError::new(err, StatusCode::UNSUPPORTED_MEDIA_TYPE).into()
}
/// Helper function that creates wrapper of any error and generate
/// *RANGE NOT SATISFIABLE* response.
#[allow(non_snake_case)]
pub fn ErrorRangeNotSatisfiable<T>(err: T) -> Error
where
T: Send + Sync + fmt::Debug + fmt::Display + 'static,
{
InternalError::new(err, StatusCode::RANGE_NOT_SATISFIABLE).into()
}
/// Helper function that creates wrapper of any error and generate
/// *EXPECTATION FAILED* response.
#[allow(non_snake_case)]
@@ -839,6 +923,106 @@ where
InternalError::new(err, StatusCode::EXPECTATION_FAILED).into()
}
/// Helper function that creates wrapper of any error and generate
/// *IM A TEAPOT* response.
#[allow(non_snake_case)]
pub fn ErrorImATeapot<T>(err: T) -> Error
where
T: Send + Sync + fmt::Debug + fmt::Display + 'static,
{
InternalError::new(err, StatusCode::IM_A_TEAPOT).into()
}
/// Helper function that creates wrapper of any error and generate
/// *MISDIRECTED REQUEST* response.
#[allow(non_snake_case)]
pub fn ErrorMisdirectedRequest<T>(err: T) -> Error
where
T: Send + Sync + fmt::Debug + fmt::Display + 'static,
{
InternalError::new(err, StatusCode::MISDIRECTED_REQUEST).into()
}
/// Helper function that creates wrapper of any error and generate
/// *UNPROCESSABLE ENTITY* response.
#[allow(non_snake_case)]
pub fn ErrorUnprocessableEntity<T>(err: T) -> Error
where
T: Send + Sync + fmt::Debug + fmt::Display + 'static,
{
InternalError::new(err, StatusCode::UNPROCESSABLE_ENTITY).into()
}
/// Helper function that creates wrapper of any error and generate
/// *LOCKED* response.
#[allow(non_snake_case)]
pub fn ErrorLocked<T>(err: T) -> Error
where
T: Send + Sync + fmt::Debug + fmt::Display + 'static,
{
InternalError::new(err, StatusCode::LOCKED).into()
}
/// Helper function that creates wrapper of any error and generate
/// *FAILED DEPENDENCY* response.
#[allow(non_snake_case)]
pub fn ErrorFailedDependency<T>(err: T) -> Error
where
T: Send + Sync + fmt::Debug + fmt::Display + 'static,
{
InternalError::new(err, StatusCode::FAILED_DEPENDENCY).into()
}
/// Helper function that creates wrapper of any error and generate
/// *UPGRADE REQUIRED* response.
#[allow(non_snake_case)]
pub fn ErrorUpgradeRequired<T>(err: T) -> Error
where
T: Send + Sync + fmt::Debug + fmt::Display + 'static,
{
InternalError::new(err, StatusCode::UPGRADE_REQUIRED).into()
}
/// Helper function that creates wrapper of any error and generate
/// *PRECONDITION REQUIRED* response.
#[allow(non_snake_case)]
pub fn ErrorPreconditionRequired<T>(err: T) -> Error
where
T: Send + Sync + fmt::Debug + fmt::Display + 'static,
{
InternalError::new(err, StatusCode::PRECONDITION_REQUIRED).into()
}
/// Helper function that creates wrapper of any error and generate
/// *TOO MANY REQUESTS* response.
#[allow(non_snake_case)]
pub fn ErrorTooManyRequests<T>(err: T) -> Error
where
T: Send + Sync + fmt::Debug + fmt::Display + 'static,
{
InternalError::new(err, StatusCode::TOO_MANY_REQUESTS).into()
}
/// Helper function that creates wrapper of any error and generate
/// *REQUEST HEADER FIELDS TOO LARGE* response.
#[allow(non_snake_case)]
pub fn ErrorRequestHeaderFieldsTooLarge<T>(err: T) -> Error
where
T: Send + Sync + fmt::Debug + fmt::Display + 'static,
{
InternalError::new(err, StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE).into()
}
/// Helper function that creates wrapper of any error and generate
/// *UNAVAILABLE FOR LEGAL REASONS* response.
#[allow(non_snake_case)]
pub fn ErrorUnavailableForLegalReasons<T>(err: T) -> Error
where
T: Send + Sync + fmt::Debug + fmt::Display + 'static,
{
InternalError::new(err, StatusCode::UNAVAILABLE_FOR_LEGAL_REASONS).into()
}
/// Helper function that creates wrapper of any error and
/// generate *INTERNAL SERVER ERROR* response.
#[allow(non_snake_case)]
@@ -889,6 +1073,66 @@ where
InternalError::new(err, StatusCode::GATEWAY_TIMEOUT).into()
}
/// Helper function that creates wrapper of any error and
/// generate *HTTP VERSION NOT SUPPORTED* response.
#[allow(non_snake_case)]
pub fn ErrorHttpVersionNotSupported<T>(err: T) -> Error
where
T: Send + Sync + fmt::Debug + fmt::Display + 'static,
{
InternalError::new(err, StatusCode::HTTP_VERSION_NOT_SUPPORTED).into()
}
/// Helper function that creates wrapper of any error and
/// generate *VARIANT ALSO NEGOTIATES* response.
#[allow(non_snake_case)]
pub fn ErrorVariantAlsoNegotiates<T>(err: T) -> Error
where
T: Send + Sync + fmt::Debug + fmt::Display + 'static,
{
InternalError::new(err, StatusCode::VARIANT_ALSO_NEGOTIATES).into()
}
/// Helper function that creates wrapper of any error and
/// generate *INSUFFICIENT STORAGE* response.
#[allow(non_snake_case)]
pub fn ErrorInsufficientStorage<T>(err: T) -> Error
where
T: Send + Sync + fmt::Debug + fmt::Display + 'static,
{
InternalError::new(err, StatusCode::INSUFFICIENT_STORAGE).into()
}
/// Helper function that creates wrapper of any error and
/// generate *LOOP DETECTED* response.
#[allow(non_snake_case)]
pub fn ErrorLoopDetected<T>(err: T) -> Error
where
T: Send + Sync + fmt::Debug + fmt::Display + 'static,
{
InternalError::new(err, StatusCode::LOOP_DETECTED).into()
}
/// Helper function that creates wrapper of any error and
/// generate *NOT EXTENDED* response.
#[allow(non_snake_case)]
pub fn ErrorNotExtended<T>(err: T) -> Error
where
T: Send + Sync + fmt::Debug + fmt::Display + 'static,
{
InternalError::new(err, StatusCode::NOT_EXTENDED).into()
}
/// Helper function that creates wrapper of any error and
/// generate *NETWORK AUTHENTICATION REQUIRED* response.
#[allow(non_snake_case)]
pub fn ErrorNetworkAuthenticationRequired<T>(err: T) -> Error
where
T: Send + Sync + fmt::Debug + fmt::Display + 'static,
{
InternalError::new(err, StatusCode::NETWORK_AUTHENTICATION_REQUIRED).into()
}
#[cfg(test)]
mod tests {
use super::*;
@@ -1068,6 +1312,9 @@ mod tests {
let r: HttpResponse = ErrorUnauthorized("err").into();
assert_eq!(r.status(), StatusCode::UNAUTHORIZED);
let r: HttpResponse = ErrorPaymentRequired("err").into();
assert_eq!(r.status(), StatusCode::PAYMENT_REQUIRED);
let r: HttpResponse = ErrorForbidden("err").into();
assert_eq!(r.status(), StatusCode::FORBIDDEN);
@@ -1077,6 +1324,12 @@ mod tests {
let r: HttpResponse = ErrorMethodNotAllowed("err").into();
assert_eq!(r.status(), StatusCode::METHOD_NOT_ALLOWED);
let r: HttpResponse = ErrorNotAcceptable("err").into();
assert_eq!(r.status(), StatusCode::NOT_ACCEPTABLE);
let r: HttpResponse = ErrorProxyAuthenticationRequired("err").into();
assert_eq!(r.status(), StatusCode::PROXY_AUTHENTICATION_REQUIRED);
let r: HttpResponse = ErrorRequestTimeout("err").into();
assert_eq!(r.status(), StatusCode::REQUEST_TIMEOUT);
@@ -1086,12 +1339,57 @@ mod tests {
let r: HttpResponse = ErrorGone("err").into();
assert_eq!(r.status(), StatusCode::GONE);
let r: HttpResponse = ErrorLengthRequired("err").into();
assert_eq!(r.status(), StatusCode::LENGTH_REQUIRED);
let r: HttpResponse = ErrorPreconditionFailed("err").into();
assert_eq!(r.status(), StatusCode::PRECONDITION_FAILED);
let r: HttpResponse = ErrorPayloadTooLarge("err").into();
assert_eq!(r.status(), StatusCode::PAYLOAD_TOO_LARGE);
let r: HttpResponse = ErrorUriTooLong("err").into();
assert_eq!(r.status(), StatusCode::URI_TOO_LONG);
let r: HttpResponse = ErrorUnsupportedMediaType("err").into();
assert_eq!(r.status(), StatusCode::UNSUPPORTED_MEDIA_TYPE);
let r: HttpResponse = ErrorRangeNotSatisfiable("err").into();
assert_eq!(r.status(), StatusCode::RANGE_NOT_SATISFIABLE);
let r: HttpResponse = ErrorExpectationFailed("err").into();
assert_eq!(r.status(), StatusCode::EXPECTATION_FAILED);
let r: HttpResponse = ErrorImATeapot("err").into();
assert_eq!(r.status(), StatusCode::IM_A_TEAPOT);
let r: HttpResponse = ErrorMisdirectedRequest("err").into();
assert_eq!(r.status(), StatusCode::MISDIRECTED_REQUEST);
let r: HttpResponse = ErrorUnprocessableEntity("err").into();
assert_eq!(r.status(), StatusCode::UNPROCESSABLE_ENTITY);
let r: HttpResponse = ErrorLocked("err").into();
assert_eq!(r.status(), StatusCode::LOCKED);
let r: HttpResponse = ErrorFailedDependency("err").into();
assert_eq!(r.status(), StatusCode::FAILED_DEPENDENCY);
let r: HttpResponse = ErrorUpgradeRequired("err").into();
assert_eq!(r.status(), StatusCode::UPGRADE_REQUIRED);
let r: HttpResponse = ErrorPreconditionRequired("err").into();
assert_eq!(r.status(), StatusCode::PRECONDITION_REQUIRED);
let r: HttpResponse = ErrorTooManyRequests("err").into();
assert_eq!(r.status(), StatusCode::TOO_MANY_REQUESTS);
let r: HttpResponse = ErrorRequestHeaderFieldsTooLarge("err").into();
assert_eq!(r.status(), StatusCode::REQUEST_HEADER_FIELDS_TOO_LARGE);
let r: HttpResponse = ErrorUnavailableForLegalReasons("err").into();
assert_eq!(r.status(), StatusCode::UNAVAILABLE_FOR_LEGAL_REASONS);
let r: HttpResponse = ErrorInternalServerError("err").into();
assert_eq!(r.status(), StatusCode::INTERNAL_SERVER_ERROR);
@@ -1106,5 +1404,23 @@ mod tests {
let r: HttpResponse = ErrorGatewayTimeout("err").into();
assert_eq!(r.status(), StatusCode::GATEWAY_TIMEOUT);
let r: HttpResponse = ErrorHttpVersionNotSupported("err").into();
assert_eq!(r.status(), StatusCode::HTTP_VERSION_NOT_SUPPORTED);
let r: HttpResponse = ErrorVariantAlsoNegotiates("err").into();
assert_eq!(r.status(), StatusCode::VARIANT_ALSO_NEGOTIATES);
let r: HttpResponse = ErrorInsufficientStorage("err").into();
assert_eq!(r.status(), StatusCode::INSUFFICIENT_STORAGE);
let r: HttpResponse = ErrorLoopDetected("err").into();
assert_eq!(r.status(), StatusCode::LOOP_DETECTED);
let r: HttpResponse = ErrorNotExtended("err").into();
assert_eq!(r.status(), StatusCode::NOT_EXTENDED);
let r: HttpResponse = ErrorNetworkAuthenticationRequired("err").into();
assert_eq!(r.status(), StatusCode::NETWORK_AUTHENTICATION_REQUIRED);
}
}

View File

@@ -31,6 +31,7 @@ impl Hasher for IdHasher {
type AnyMap = HashMap<TypeId, Box<Any>, BuildHasherDefault<IdHasher>>;
#[derive(Default)]
/// A type map of request extensions.
pub struct Extensions {
map: AnyMap,

View File

@@ -12,13 +12,15 @@ use serde::de::{self, DeserializeOwned};
use serde_urlencoded;
use de::PathDeserializer;
use error::{Error, ErrorBadRequest, ErrorNotFound, UrlencodedError};
use error::{Error, ErrorBadRequest, ErrorNotFound, UrlencodedError, ErrorConflict};
use handler::{AsyncResult, FromRequest};
use httpmessage::{HttpMessage, MessageBody, UrlEncoded};
use httprequest::HttpRequest;
use Either;
#[derive(PartialEq, Eq, PartialOrd, Ord)]
/// Extract typed information from the request's path.
/// Extract typed information from the request's path. Information from the path is
/// URL decoded. Decoding of special characters can be disabled through `PathConfig`.
///
/// ## Example
///
@@ -111,18 +113,73 @@ impl<T, S> FromRequest<S> for Path<T>
where
T: DeserializeOwned,
{
type Config = ();
type Config = PathConfig<S>;
type Result = Result<Self, Error>;
#[inline]
fn from_request(req: &HttpRequest<S>, _: &Self::Config) -> Self::Result {
fn from_request(req: &HttpRequest<S>, cfg: &Self::Config) -> Self::Result {
let req = req.clone();
de::Deserialize::deserialize(PathDeserializer::new(&req))
.map_err(ErrorNotFound)
let req2 = req.clone();
let err = Rc::clone(&cfg.ehandler);
de::Deserialize::deserialize(PathDeserializer::new(&req, cfg.decode))
.map_err(move |e| (*err)(e, &req2))
.map(|inner| Path { inner })
}
}
/// Path extractor configuration
///
/// ```rust
/// # extern crate actix_web;
/// use actix_web::{error, http, App, HttpResponse, Path, Result};
///
/// /// deserialize `Info` from request's body, max payload size is 4kb
/// fn index(info: Path<(u32, String)>) -> Result<String> {
/// Ok(format!("Welcome {}!", info.1))
/// }
///
/// fn main() {
/// let app = App::new().resource("/index.html/{id}/{name}", |r| {
/// r.method(http::Method::GET).with_config(index, |cfg| {
/// cfg.0.error_handler(|err, req| {
/// // <- create custom error response
/// error::InternalError::from_response(err, HttpResponse::Conflict().finish()).into()
/// });
/// })
/// });
/// }
/// ```
pub struct PathConfig<S> {
ehandler: Rc<Fn(serde_urlencoded::de::Error, &HttpRequest<S>) -> Error>,
decode: bool,
}
impl<S> PathConfig<S> {
/// Set custom error handler
pub fn error_handler<F>(&mut self, f: F) -> &mut Self
where
F: Fn(serde_urlencoded::de::Error, &HttpRequest<S>) -> Error + 'static,
{
self.ehandler = Rc::new(f);
self
}
/// Disable decoding of URL encoded special charaters from the path
pub fn disable_decoding(&mut self) -> &mut Self
{
self.decode = false;
self
}
}
impl<S> Default for PathConfig<S> {
fn default() -> Self {
PathConfig {
ehandler: Rc::new(|e, _| ErrorNotFound(e)),
decode: true,
}
}
}
impl<T: fmt::Debug> fmt::Debug for Path<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.inner.fmt(f)
@@ -136,7 +193,7 @@ impl<T: fmt::Display> fmt::Display for Path<T> {
}
#[derive(PartialEq, Eq, PartialOrd, Ord)]
/// Extract typed information from from the request's query.
/// Extract typed information from the request's query.
///
/// ## Example
///
@@ -200,17 +257,69 @@ impl<T, S> FromRequest<S> for Query<T>
where
T: de::DeserializeOwned,
{
type Config = ();
type Config = QueryConfig<S>;
type Result = Result<Self, Error>;
#[inline]
fn from_request(req: &HttpRequest<S>, _: &Self::Config) -> Self::Result {
fn from_request(req: &HttpRequest<S>, cfg: &Self::Config) -> Self::Result {
let req2 = req.clone();
let err = Rc::clone(&cfg.ehandler);
serde_urlencoded::from_str::<T>(req.query_string())
.map_err(|e| e.into())
.map_err(move |e| (*err)(e, &req2))
.map(Query)
}
}
/// Query extractor configuration
///
/// ```rust
/// # extern crate actix_web;
/// #[macro_use] extern crate serde_derive;
/// use actix_web::{error, http, App, HttpResponse, Query, Result};
///
/// #[derive(Deserialize)]
/// struct Info {
/// username: String,
/// }
///
/// /// deserialize `Info` from request's body, max payload size is 4kb
/// fn index(info: Query<Info>) -> Result<String> {
/// Ok(format!("Welcome {}!", info.username))
/// }
///
/// fn main() {
/// let app = App::new().resource("/index.html", |r| {
/// r.method(http::Method::GET).with_config(index, |cfg| {
/// cfg.0.error_handler(|err, req| {
/// // <- create custom error response
/// error::InternalError::from_response(err, HttpResponse::Conflict().finish()).into()
/// });
/// })
/// });
/// }
/// ```
pub struct QueryConfig<S> {
ehandler: Rc<Fn(serde_urlencoded::de::Error, &HttpRequest<S>) -> Error>,
}
impl<S> QueryConfig<S> {
/// Set custom error handler
pub fn error_handler<F>(&mut self, f: F) -> &mut Self
where
F: Fn(serde_urlencoded::de::Error, &HttpRequest<S>) -> Error + 'static,
{
self.ehandler = Rc::new(f);
self
}
}
impl<S> Default for QueryConfig<S> {
fn default() -> Self {
QueryConfig {
ehandler: Rc::new(|e, _| e.into()),
}
}
}
impl<T: fmt::Debug> fmt::Debug for Query<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(f)
@@ -526,6 +635,153 @@ where
}
}
/// Extract either one of two fields from the request.
///
/// If both or none of the fields can be extracted, the default behaviour is to prefer the first
/// successful, last that failed. The behaviour can be changed by setting the appropriate
/// ```EitherCollisionStrategy```.
///
/// CAVEAT: Most of the time both extractors will be run. Make sure that the extractors you specify
/// can be run one after another (or in parallel). This will always fail for extractors that modify
/// the request state (such as the `Form` extractors that read in the body stream).
/// So Either<Form<A>, Form<B>> will not work correctly - it will only succeed if it matches the first
/// option, but will always fail to match the second (since the body stream will be at the end, and
/// appear to be empty).
///
/// ## Example
///
/// ```rust
/// # extern crate actix_web;
/// extern crate rand;
/// #[macro_use] extern crate serde_derive;
/// use actix_web::{http, App, Result, HttpRequest, Error, FromRequest};
/// use actix_web::error::ErrorBadRequest;
/// use actix_web::Either;
///
/// #[derive(Debug, Deserialize)]
/// struct Thing { name: String }
///
/// #[derive(Debug, Deserialize)]
/// struct OtherThing { id: String }
///
/// impl<S> FromRequest<S> for Thing {
/// type Config = ();
/// type Result = Result<Thing, Error>;
///
/// #[inline]
/// fn from_request(req: &HttpRequest<S>, _cfg: &Self::Config) -> Self::Result {
/// if rand::random() {
/// Ok(Thing { name: "thingy".into() })
/// } else {
/// Err(ErrorBadRequest("no luck"))
/// }
/// }
/// }
///
/// impl<S> FromRequest<S> for OtherThing {
/// type Config = ();
/// type Result = Result<OtherThing, Error>;
///
/// #[inline]
/// fn from_request(req: &HttpRequest<S>, _cfg: &Self::Config) -> Self::Result {
/// if rand::random() {
/// Ok(OtherThing { id: "otherthingy".into() })
/// } else {
/// Err(ErrorBadRequest("no luck"))
/// }
/// }
/// }
///
/// /// extract text data from request
/// fn index(supplied_thing: Either<Thing, OtherThing>) -> Result<String> {
/// match supplied_thing {
/// Either::A(thing) => Ok(format!("Got something: {:?}", thing)),
/// Either::B(other_thing) => Ok(format!("Got anotherthing: {:?}", other_thing))
/// }
/// }
///
/// fn main() {
/// let app = App::new().resource("/users/:first", |r| {
/// r.method(http::Method::POST).with(index)
/// });
/// }
/// ```
impl<A: 'static, B: 'static, S: 'static> FromRequest<S> for Either<A,B> where A: FromRequest<S>, B: FromRequest<S> {
type Config = EitherConfig<A,B,S>;
type Result = AsyncResult<Either<A,B>>;
#[inline]
fn from_request(req: &HttpRequest<S>, cfg: &Self::Config) -> Self::Result {
let a = A::from_request(&req.clone(), &cfg.a).into().map(|a| Either::A(a));
let b = B::from_request(req, &cfg.b).into().map(|b| Either::B(b));
match &cfg.collision_strategy {
EitherCollisionStrategy::PreferA => AsyncResult::future(Box::new(a.or_else(|_| b))),
EitherCollisionStrategy::PreferB => AsyncResult::future(Box::new(b.or_else(|_| a))),
EitherCollisionStrategy::FastestSuccessful => AsyncResult::future(Box::new(a.select2(b).then( |r| match r {
Ok(future::Either::A((ares, _b))) => AsyncResult::ok(ares),
Ok(future::Either::B((bres, _a))) => AsyncResult::ok(bres),
Err(future::Either::A((_aerr, b))) => AsyncResult::future(Box::new(b)),
Err(future::Either::B((_berr, a))) => AsyncResult::future(Box::new(a))
}))),
EitherCollisionStrategy::ErrorA => AsyncResult::future(Box::new(b.then(|r| match r {
Err(_berr) => AsyncResult::future(Box::new(a)),
Ok(b) => AsyncResult::future(Box::new(a.then( |r| match r {
Ok(_a) => Err(ErrorConflict("Both wings of either extractor completed")),
Err(_arr) => Ok(b)
})))
}))),
EitherCollisionStrategy::ErrorB => AsyncResult::future(Box::new(a.then(|r| match r {
Err(_aerr) => AsyncResult::future(Box::new(b)),
Ok(a) => AsyncResult::future(Box::new(b.then( |r| match r {
Ok(_b) => Err(ErrorConflict("Both wings of either extractor completed")),
Err(_berr) => Ok(a)
})))
}))),
}
}
}
/// Defines the result if neither or both of the extractors supplied to an Either<A,B> extractor succeed.
#[derive(Debug)]
pub enum EitherCollisionStrategy {
/// If both are successful, return A, if both fail, return error of B
PreferA,
/// If both are successful, return B, if both fail, return error of A
PreferB,
/// Return result of the faster, error of the slower if both fail
FastestSuccessful,
/// Return error if both succeed, return error of A if both fail
ErrorA,
/// Return error if both succeed, return error of B if both fail
ErrorB
}
impl Default for EitherCollisionStrategy {
fn default() -> Self {
EitherCollisionStrategy::FastestSuccessful
}
}
///Determines Either extractor configuration
///
///By default `EitherCollisionStrategy::FastestSuccessful` is used.
pub struct EitherConfig<A,B,S> where A: FromRequest<S>, B: FromRequest<S> {
a: A::Config,
b: B::Config,
collision_strategy: EitherCollisionStrategy
}
impl<A,B,S> Default for EitherConfig<A,B,S> where A: FromRequest<S>, B: FromRequest<S> {
fn default() -> Self {
EitherConfig {
a: A::Config::default(),
b: B::Config::default(),
collision_strategy: EitherCollisionStrategy::default()
}
}
}
/// Optionally extract a field from the request or extract the Error if unsuccessful
///
/// If the FromRequest for T fails, inject Err into handler rather than returning an error response
@@ -766,6 +1022,11 @@ mod tests {
hello: String,
}
#[derive(Deserialize, Debug, PartialEq)]
struct OtherInfo {
bye: String,
}
#[test]
fn test_bytes() {
let cfg = PayloadConfig::default();
@@ -869,6 +1130,48 @@ mod tests {
}
}
#[test]
fn test_either() {
let req = TestRequest::default().finish();
let mut cfg: EitherConfig<Query<Info>, Query<OtherInfo>, _> = EitherConfig::default();
assert!(Either::<Query<Info>, Query<OtherInfo>>::from_request(&req, &cfg).poll().is_err());
let req = TestRequest::default().uri("/index?hello=world").finish();
match Either::<Query<Info>, Query<OtherInfo>>::from_request(&req, &cfg).poll().unwrap() {
Async::Ready(r) => assert_eq!(r, Either::A(Query(Info { hello: "world".into() }))),
_ => unreachable!(),
}
let req = TestRequest::default().uri("/index?bye=world").finish();
match Either::<Query<Info>, Query<OtherInfo>>::from_request(&req, &cfg).poll().unwrap() {
Async::Ready(r) => assert_eq!(r, Either::B(Query(OtherInfo { bye: "world".into() }))),
_ => unreachable!(),
}
let req = TestRequest::default().uri("/index?hello=world&bye=world").finish();
cfg.collision_strategy = EitherCollisionStrategy::PreferA;
match Either::<Query<Info>, Query<OtherInfo>>::from_request(&req, &cfg).poll().unwrap() {
Async::Ready(r) => assert_eq!(r, Either::A(Query(Info { hello: "world".into() }))),
_ => unreachable!(),
}
cfg.collision_strategy = EitherCollisionStrategy::PreferB;
match Either::<Query<Info>, Query<OtherInfo>>::from_request(&req, &cfg).poll().unwrap() {
Async::Ready(r) => assert_eq!(r, Either::B(Query(OtherInfo { bye: "world".into() }))),
_ => unreachable!(),
}
cfg.collision_strategy = EitherCollisionStrategy::ErrorA;
assert!(Either::<Query<Info>, Query<OtherInfo>>::from_request(&req, &cfg).poll().is_err());
cfg.collision_strategy = EitherCollisionStrategy::FastestSuccessful;
assert!(Either::<Query<Info>, Query<OtherInfo>>::from_request(&req, &cfg).poll().is_ok());
}
#[test]
fn test_result() {
let req = TestRequest::with_header(
@@ -951,15 +1254,15 @@ mod tests {
let info = router.recognize(&req, &(), 0);
let req = req.with_route_info(info);
let s = Path::<MyStruct>::from_request(&req, &()).unwrap();
let s = Path::<MyStruct>::from_request(&req, &PathConfig::default()).unwrap();
assert_eq!(s.key, "name");
assert_eq!(s.value, "user1");
let s = Path::<(String, String)>::from_request(&req, &()).unwrap();
let s = Path::<(String, String)>::from_request(&req, &PathConfig::default()).unwrap();
assert_eq!(s.0, "name");
assert_eq!(s.1, "user1");
let s = Query::<Id>::from_request(&req, &()).unwrap();
let s = Query::<Id>::from_request(&req, &QueryConfig::default()).unwrap();
assert_eq!(s.id, "test");
let mut router = Router::<()>::default();
@@ -968,11 +1271,11 @@ mod tests {
let info = router.recognize(&req, &(), 0);
let req = req.with_route_info(info);
let s = Path::<Test2>::from_request(&req, &()).unwrap();
let s = Path::<Test2>::from_request(&req, &PathConfig::default()).unwrap();
assert_eq!(s.as_ref().key, "name");
assert_eq!(s.value, 32);
let s = Path::<(String, u8)>::from_request(&req, &()).unwrap();
let s = Path::<(String, u8)>::from_request(&req, &PathConfig::default()).unwrap();
assert_eq!(s.0, "name");
assert_eq!(s.1, 32);
@@ -989,7 +1292,69 @@ mod tests {
let req = TestRequest::with_uri("/32/").finish();
let info = router.recognize(&req, &(), 0);
let req = req.with_route_info(info);
assert_eq!(*Path::<i8>::from_request(&req, &()).unwrap(), 32);
assert_eq!(*Path::<i8>::from_request(&req, &&PathConfig::default()).unwrap(), 32);
}
#[test]
fn test_extract_path_decode() {
let mut router = Router::<()>::default();
router.register_resource(Resource::new(ResourceDef::new("/{value}/")));
macro_rules! test_single_value {
($value:expr, $expected:expr) => {
{
let req = TestRequest::with_uri($value).finish();
let info = router.recognize(&req, &(), 0);
let req = req.with_route_info(info);
assert_eq!(*Path::<String>::from_request(&req, &PathConfig::default()).unwrap(), $expected);
}
}
}
test_single_value!("/%25/", "%");
test_single_value!("/%40%C2%A3%24%25%5E%26%2B%3D/", "@£$%^&+=");
test_single_value!("/%2B/", "+");
test_single_value!("/%252B/", "%2B");
test_single_value!("/%2F/", "/");
test_single_value!("/%252F/", "%2F");
test_single_value!("/http%3A%2F%2Flocalhost%3A80%2Ffoo/", "http://localhost:80/foo");
test_single_value!("/%2Fvar%2Flog%2Fsyslog/", "/var/log/syslog");
test_single_value!(
"/http%3A%2F%2Flocalhost%3A80%2Ffile%2F%252Fvar%252Flog%252Fsyslog/",
"http://localhost:80/file/%2Fvar%2Flog%2Fsyslog"
);
let req = TestRequest::with_uri("/%25/7/?id=test").finish();
let mut router = Router::<()>::default();
router.register_resource(Resource::new(ResourceDef::new("/{key}/{value}/")));
let info = router.recognize(&req, &(), 0);
let req = req.with_route_info(info);
let s = Path::<Test2>::from_request(&req, &PathConfig::default()).unwrap();
assert_eq!(s.key, "%");
assert_eq!(s.value, 7);
let s = Path::<(String, String)>::from_request(&req, &PathConfig::default()).unwrap();
assert_eq!(s.0, "%");
assert_eq!(s.1, "7");
}
#[test]
fn test_extract_path_no_decode() {
let mut router = Router::<()>::default();
router.register_resource(Resource::new(ResourceDef::new("/{value}/")));
let req = TestRequest::with_uri("/%25/").finish();
let info = router.recognize(&req, &(), 0);
let req = req.with_route_info(info);
assert_eq!(
*Path::<String>::from_request(
&req,
&&PathConfig::default().disable_decoding()
).unwrap(),
"%25"
);
}
#[test]

297
src/fs.rs
View File

@@ -11,10 +11,10 @@ use std::{cmp, io};
#[cfg(unix)]
use std::os::unix::fs::MetadataExt;
use v_htmlescape::escape as escape_html_entity;
use bytes::Bytes;
use futures::{Async, Future, Poll, Stream};
use futures_cpupool::{CpuFuture, CpuPool};
use htmlescape::encode_minimal as escape_html_entity;
use mime;
use mime_guess::{get_mime_type, guess_mime_type};
use percent_encoding::{utf8_percent_encode, DEFAULT_ENCODE_SET};
@@ -120,6 +120,32 @@ pub struct NamedFile<C = DefaultConfig> {
}
impl NamedFile {
/// Creates an instance from a previously opened file.
///
/// The given `path` need not exist and is only used to determine the `ContentType` and
/// `ContentDisposition` headers.
///
/// # Examples
///
/// ```no_run
/// extern crate actix_web;
///
/// use actix_web::fs::NamedFile;
/// use std::io::{self, Write};
/// use std::env;
/// use std::fs::File;
///
/// fn main() -> io::Result<()> {
/// let mut file = File::create("foo.txt")?;
/// file.write_all(b"Hello, world!")?;
/// let named_file = NamedFile::from_file(file, "bar.txt")?;
/// Ok(())
/// }
/// ```
pub fn from_file<P: AsRef<Path>>(file: File, path: P) -> io::Result<NamedFile> {
Self::from_file_with_config(file, path, DefaultConfig)
}
/// Attempts to open a file in read-only mode.
///
/// # Examples
@@ -135,16 +161,29 @@ impl NamedFile {
}
impl<C: StaticFileConfig> NamedFile<C> {
/// Attempts to open a file in read-only mode using provided configiration.
/// Creates an instance from a previously opened file using the provided configuration.
///
/// The given `path` need not exist and is only used to determine the `ContentType` and
/// `ContentDisposition` headers.
///
/// # Examples
///
/// ```rust
/// use actix_web::fs::{DefaultConfig, NamedFile};
/// ```no_run
/// extern crate actix_web;
///
/// let file = NamedFile::open_with_config("foo.txt", DefaultConfig);
/// use actix_web::fs::{DefaultConfig, NamedFile};
/// use std::io::{self, Write};
/// use std::env;
/// use std::fs::File;
///
/// fn main() -> io::Result<()> {
/// let mut file = File::create("foo.txt")?;
/// file.write_all(b"Hello, world!")?;
/// let named_file = NamedFile::from_file_with_config(file, "bar.txt", DefaultConfig)?;
/// Ok(())
/// }
/// ```
pub fn open_with_config<P: AsRef<Path>>(path: P, _: C) -> io::Result<NamedFile<C>> {
pub fn from_file_with_config<P: AsRef<Path>>(file: File, path: P, _: C) -> io::Result<NamedFile<C>> {
let path = path.as_ref().to_path_buf();
// Get the name of the file and use it to construct default Content-Type
@@ -169,7 +208,6 @@ impl<C: StaticFileConfig> NamedFile<C> {
(ct, cd)
};
let file = File::open(&path)?;
let md = file.metadata()?;
let modified = md.modified().ok();
let cpu_pool = None;
@@ -188,6 +226,19 @@ impl<C: StaticFileConfig> NamedFile<C> {
})
}
/// Attempts to open a file in read-only mode using provided configuration.
///
/// # Examples
///
/// ```rust
/// use actix_web::fs::{DefaultConfig, NamedFile};
///
/// let file = NamedFile::open_with_config("foo.txt", DefaultConfig);
/// ```
pub fn open_with_config<P: AsRef<Path>>(path: P, config: C) -> io::Result<NamedFile<C>> {
Self::from_file_with_config(File::open(&path)?, path, config)
}
/// Returns reference to the underlying `File` object.
#[inline]
pub fn file(&self) -> &File {
@@ -390,6 +441,8 @@ impl<C: StaticFileConfig> Responder for NamedFile<C> {
// check last modified
let not_modified = if !none_match(etag.as_ref(), req) {
true
} else if req.headers().contains_key(header::IF_NONE_MATCH) {
false
} else if let (Some(ref m), Some(header::IfModifiedSince(ref since))) =
(last_modified, req.get_header())
{
@@ -472,6 +525,7 @@ impl<C: StaticFileConfig> Responder for NamedFile<C> {
}
}
#[doc(hidden)]
/// A helper created from a `std::fs::File` which reads the file
/// chunk-by-chunk on a `CpuPool`.
pub struct ChunkedReadFile {
@@ -561,8 +615,23 @@ impl Directory {
}
}
// show file url as relative to static path
macro_rules! encode_file_url {
($path:ident) => {
utf8_percent_encode(&$path.to_string_lossy(), DEFAULT_ENCODE_SET)
};
}
// " -- &quot; & -- &amp; ' -- &#x27; < -- &lt; > -- &gt; / -- &#x2f;
macro_rules! encode_file_name {
($entry:ident) => {
escape_html_entity(&$entry.file_name().to_string_lossy())
};
}
fn directory_listing<S>(
dir: &Directory, req: &HttpRequest<S>,
dir: &Directory,
req: &HttpRequest<S>,
) -> Result<HttpResponse, io::Error> {
let index_of = format!("Index of {}", req.path());
let mut body = String::new();
@@ -575,11 +644,6 @@ fn directory_listing<S>(
Ok(p) => base.join(p),
Err(_) => continue,
};
// show file url as relative to static path
let file_url = utf8_percent_encode(&p.to_string_lossy(), DEFAULT_ENCODE_SET)
.to_string();
// " -- &quot; & -- &amp; ' -- &#x27; < -- &lt; > -- &gt;
let file_name = escape_html_entity(&entry.file_name().to_string_lossy());
// if file is a directory, add '/' to the end of the name
if let Ok(metadata) = entry.metadata() {
@@ -587,13 +651,15 @@ fn directory_listing<S>(
let _ = write!(
body,
"<li><a href=\"{}\">{}/</a></li>",
file_url, file_name
encode_file_url!(p),
encode_file_name!(entry),
);
} else {
let _ = write!(
body,
"<li><a href=\"{}\">{}</a></li>",
file_url, file_name
encode_file_url!(p),
encode_file_name!(entry),
);
}
} else {
@@ -656,7 +722,8 @@ impl<S: 'static> StaticFiles<S> {
/// Create new `StaticFiles` instance for specified base directory and
/// `CpuPool`.
pub fn with_pool<T: Into<PathBuf>>(
dir: T, pool: CpuPool,
dir: T,
pool: CpuPool,
) -> Result<StaticFiles<S>, Error> {
Self::with_config_pool(dir, pool, DefaultConfig)
}
@@ -667,7 +734,8 @@ impl<S: 'static, C: StaticFileConfig> StaticFiles<S, C> {
///
/// Identical with `new` but allows to specify configiration to use.
pub fn with_config<T: Into<PathBuf>>(
dir: T, config: C,
dir: T,
config: C,
) -> Result<StaticFiles<S, C>, Error> {
// use default CpuPool
let pool = { DEFAULT_CPUPOOL.lock().clone() };
@@ -678,7 +746,9 @@ impl<S: 'static, C: StaticFileConfig> StaticFiles<S, C> {
/// Create new `StaticFiles` instance for specified base directory with config and
/// `CpuPool`.
pub fn with_config_pool<T: Into<PathBuf>>(
dir: T, pool: CpuPool, _: C,
dir: T,
pool: CpuPool,
_: C,
) -> Result<StaticFiles<S, C>, Error> {
let dir = dir.into().canonicalize()?;
@@ -722,7 +792,7 @@ impl<S: 'static, C: StaticFileConfig> StaticFiles<S, C> {
/// Set index file
///
/// Redirects to specific index file for directory "/" instead of
/// Shows specific index file for directory "/" instead of
/// showing files listing.
pub fn index_file<T: Into<String>>(mut self, index: T) -> StaticFiles<S, C> {
self.index = Some(index.into());
@@ -736,9 +806,10 @@ impl<S: 'static, C: StaticFileConfig> StaticFiles<S, C> {
}
fn try_handle(
&self, req: &HttpRequest<S>,
&self,
req: &HttpRequest<S>,
) -> Result<AsyncResult<HttpResponse>, Error> {
let tail: String = req.match_info().query("tail")?;
let tail: String = req.match_info().get_decoded("tail").unwrap_or_else(|| "".to_string());
let relpath = PathBuf::from_param(tail.trim_left_matches('/'))?;
// full filepath
@@ -746,17 +817,11 @@ impl<S: 'static, C: StaticFileConfig> StaticFiles<S, C> {
if path.is_dir() {
if let Some(ref redir_index) = self.index {
// TODO: Don't redirect, just return the index content.
// TODO: It'd be nice if there were a good usable URL manipulation
// library
let mut new_path: String = req.path().to_owned();
if !new_path.ends_with('/') {
new_path.push('/');
}
new_path.push_str(redir_index);
HttpResponse::Found()
.header(header::LOCATION, new_path.as_str())
.finish()
let path = path.join(redir_index);
NamedFile::open_with_config(path, C::default())?
.set_cpu_pool(self.cpu_pool.clone())
.respond_to(&req)?
.respond_to(&req)
} else if self.show_index {
let dir = Directory::new(self.directory.clone(), path);
@@ -881,6 +946,8 @@ impl HttpRange {
#[cfg(test)]
mod tests {
use std::fs;
use std::time::Duration;
use std::ops::Add;
use super::*;
use application::App;
@@ -900,6 +967,43 @@ mod tests {
assert_eq!(m, mime::APPLICATION_OCTET_STREAM);
}
#[test]
fn test_if_modified_since_without_if_none_match() {
let mut file = NamedFile::open("Cargo.toml")
.unwrap()
.set_cpu_pool(CpuPool::new(1));
let since = header::HttpDate::from(
SystemTime::now().add(Duration::from_secs(60)));
let req = TestRequest::default()
.header(header::IF_MODIFIED_SINCE, since)
.finish();
let resp = file.respond_to(&req).unwrap();
assert_eq!(
resp.status(),
StatusCode::NOT_MODIFIED
);
}
#[test]
fn test_if_modified_since_with_if_none_match() {
let mut file = NamedFile::open("Cargo.toml")
.unwrap()
.set_cpu_pool(CpuPool::new(1));
let since = header::HttpDate::from(
SystemTime::now().add(Duration::from_secs(60)));
let req = TestRequest::default()
.header(header::IF_NONE_MATCH, "miss_etag")
.header(header::IF_MODIFIED_SINCE, since)
.finish();
let resp = file.respond_to(&req).unwrap();
assert_ne!(
resp.status(),
StatusCode::NOT_MODIFIED
);
}
#[test]
fn test_named_file_text() {
assert!(NamedFile::open("test--").is_err());
@@ -1280,6 +1384,27 @@ mod tests {
assert_eq!(bytes, data);
}
#[test]
fn test_static_files_with_spaces() {
let mut srv = test::TestServer::with_factory(|| {
App::new().handler(
"/",
StaticFiles::new(".").unwrap().index_file("Cargo.toml"),
)
});
let request = srv
.get()
.uri(srv.url("/tests/test%20space.binary"))
.finish()
.unwrap();
let response = srv.execute(request.send()).unwrap();
assert_eq!(response.status(), StatusCode::OK);
let bytes = srv.execute(response.body()).unwrap();
let data = Bytes::from(fs::read("tests/test space.binary").unwrap());
assert_eq!(bytes, data);
}
#[derive(Default)]
pub struct OnlyMethodHeadConfig;
impl StaticFileConfig for OnlyMethodHeadConfig {
@@ -1392,43 +1517,66 @@ mod tests {
}
#[test]
fn test_redirect_to_index() {
let st = StaticFiles::new(".").unwrap().index_file("index.html");
fn test_serve_index() {
let st = StaticFiles::new(".").unwrap().index_file("test.binary");
let req = TestRequest::default().uri("/tests").finish();
let resp = st.handle(&req).respond_to(&req).unwrap();
let resp = resp.as_msg();
assert_eq!(resp.status(), StatusCode::FOUND);
assert_eq!(resp.status(), StatusCode::OK);
assert_eq!(
resp.headers().get(header::LOCATION).unwrap(),
"/tests/index.html"
resp.headers().get(header::CONTENT_TYPE).expect("content type"),
"application/octet-stream"
);
assert_eq!(
resp.headers().get(header::CONTENT_DISPOSITION).expect("content disposition"),
"attachment; filename=\"test.binary\""
);
let req = TestRequest::default().uri("/tests/").finish();
let resp = st.handle(&req).respond_to(&req).unwrap();
let resp = resp.as_msg();
assert_eq!(resp.status(), StatusCode::FOUND);
assert_eq!(resp.status(), StatusCode::OK);
assert_eq!(
resp.headers().get(header::LOCATION).unwrap(),
"/tests/index.html"
resp.headers().get(header::CONTENT_TYPE).unwrap(),
"application/octet-stream"
);
assert_eq!(
resp.headers().get(header::CONTENT_DISPOSITION).unwrap(),
"attachment; filename=\"test.binary\""
);
// nonexistent index file
let req = TestRequest::default().uri("/tests/unknown").finish();
let resp = st.handle(&req).respond_to(&req).unwrap();
let resp = resp.as_msg();
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
let req = TestRequest::default().uri("/tests/unknown/").finish();
let resp = st.handle(&req).respond_to(&req).unwrap();
let resp = resp.as_msg();
assert_eq!(resp.status(), StatusCode::NOT_FOUND);
}
#[test]
fn test_redirect_to_index_nested() {
fn test_serve_index_nested() {
let st = StaticFiles::new(".").unwrap().index_file("mod.rs");
let req = TestRequest::default().uri("/src/client").finish();
let resp = st.handle(&req).respond_to(&req).unwrap();
let resp = resp.as_msg();
assert_eq!(resp.status(), StatusCode::FOUND);
assert_eq!(resp.status(), StatusCode::OK);
assert_eq!(
resp.headers().get(header::LOCATION).unwrap(),
"/src/client/mod.rs"
resp.headers().get(header::CONTENT_TYPE).unwrap(),
"text/x-rust"
);
assert_eq!(
resp.headers().get(header::CONTENT_DISPOSITION).unwrap(),
"inline; filename=\"mod.rs\""
);
}
#[test]
fn integration_redirect_to_index_with_prefix() {
fn integration_serve_index_with_prefix() {
let mut srv = test::TestServer::with_factory(|| {
App::new()
.prefix("public")
@@ -1437,29 +1585,21 @@ mod tests {
let request = srv.get().uri(srv.url("/public")).finish().unwrap();
let response = srv.execute(request.send()).unwrap();
assert_eq!(response.status(), StatusCode::FOUND);
let loc = response
.headers()
.get(header::LOCATION)
.unwrap()
.to_str()
.unwrap();
assert_eq!(loc, "/public/Cargo.toml");
assert_eq!(response.status(), StatusCode::OK);
let bytes = srv.execute(response.body()).unwrap();
let data = Bytes::from(fs::read("Cargo.toml").unwrap());
assert_eq!(bytes, data);
let request = srv.get().uri(srv.url("/public/")).finish().unwrap();
let response = srv.execute(request.send()).unwrap();
assert_eq!(response.status(), StatusCode::FOUND);
let loc = response
.headers()
.get(header::LOCATION)
.unwrap()
.to_str()
.unwrap();
assert_eq!(loc, "/public/Cargo.toml");
assert_eq!(response.status(), StatusCode::OK);
let bytes = srv.execute(response.body()).unwrap();
let data = Bytes::from(fs::read("Cargo.toml").unwrap());
assert_eq!(bytes, data);
}
#[test]
fn integration_redirect_to_index() {
fn integration_serve_index() {
let mut srv = test::TestServer::with_factory(|| {
App::new().handler(
"test",
@@ -1469,25 +1609,26 @@ mod tests {
let request = srv.get().uri(srv.url("/test")).finish().unwrap();
let response = srv.execute(request.send()).unwrap();
assert_eq!(response.status(), StatusCode::FOUND);
let loc = response
.headers()
.get(header::LOCATION)
.unwrap()
.to_str()
.unwrap();
assert_eq!(loc, "/test/Cargo.toml");
assert_eq!(response.status(), StatusCode::OK);
let bytes = srv.execute(response.body()).unwrap();
let data = Bytes::from(fs::read("Cargo.toml").unwrap());
assert_eq!(bytes, data);
let request = srv.get().uri(srv.url("/test/")).finish().unwrap();
let response = srv.execute(request.send()).unwrap();
assert_eq!(response.status(), StatusCode::FOUND);
let loc = response
.headers()
.get(header::LOCATION)
.unwrap()
.to_str()
.unwrap();
assert_eq!(loc, "/test/Cargo.toml");
assert_eq!(response.status(), StatusCode::OK);
let bytes = srv.execute(response.body()).unwrap();
let data = Bytes::from(fs::read("Cargo.toml").unwrap());
assert_eq!(bytes, data);
// nonexistent index file
let request = srv.get().uri(srv.url("/test/unknown")).finish().unwrap();
let response = srv.execute(request.send()).unwrap();
assert_eq!(response.status(), StatusCode::NOT_FOUND);
let request = srv.get().uri(srv.url("/test/unknown/")).finish().unwrap();
let response = srv.execute(request.send()).unwrap();
assert_eq!(response.status(), StatusCode::NOT_FOUND);
}
#[test]

View File

@@ -86,7 +86,7 @@ pub trait FromRequest<S>: Sized {
/// # fn is_a_variant() -> bool { true }
/// # fn main() {}
/// ```
#[derive(Debug)]
#[derive(Debug, PartialEq)]
pub enum Either<A, B> {
/// First branch of the type
A(A),
@@ -250,7 +250,7 @@ pub(crate) enum AsyncResultItem<I, E> {
impl<I, E> AsyncResult<I, E> {
/// Create async response
#[inline]
pub fn async(fut: Box<Future<Item = I, Error = E>>) -> AsyncResult<I, E> {
pub fn future(fut: Box<Future<Item = I, Error = E>>) -> AsyncResult<I, E> {
AsyncResult(Some(AsyncResultItem::Future(fut)))
}
@@ -401,7 +401,7 @@ where
},
Err(e) => err(e),
});
Ok(AsyncResult::async(Box::new(fut)))
Ok(AsyncResult::future(Box::new(fut)))
}
}
@@ -502,7 +502,7 @@ where
Err(e) => Either::A(err(e)),
}
});
AsyncResult::async(Box::new(fut))
AsyncResult::future(Box::new(fut))
}
}
@@ -530,8 +530,7 @@ where
/// }
///
/// /// extract path info using serde
/// fn index(data: (State<MyApp>, Path<Info>)) -> String {
/// let (state, path) = data;
/// fn index(state: State<MyApp>, path: Path<Info>) -> String {
/// format!("{} {}!", state.msg, path.username)
/// }
///

View File

@@ -200,7 +200,7 @@ pub trait HttpMessage: Sized {
/// # fn main() {}
/// ```
fn json<T: DeserializeOwned>(&self) -> JsonBody<Self, T> {
JsonBody::new(self)
JsonBody::new::<()>(self, None)
}
/// Return stream to http payload processes as multipart.
@@ -213,9 +213,10 @@ pub trait HttpMessage: Sized {
/// # extern crate actix_web;
/// # extern crate env_logger;
/// # extern crate futures;
/// # extern crate actix;
/// # use std::str;
/// # use actix_web::*;
/// # use actix_web::actix::fut::FinishStream;
/// # use actix::FinishStream;
/// # use futures::{Future, Stream};
/// # use futures::future::{ok, result, Either};
/// fn index(mut req: HttpRequest) -> Box<Future<Item = HttpResponse, Error = Error>> {

View File

@@ -216,7 +216,7 @@ impl<S> HttpRequest<S> {
self.url_for(name, &NO_PARAMS)
}
/// This method returns reference to current `RouteInfo` object.
/// This method returns reference to current `ResourceInfo` object.
#[inline]
pub fn resource(&self) -> &ResourceInfo {
&self.resource
@@ -354,24 +354,24 @@ impl<S> FromRequest<S> for HttpRequest<S> {
impl<S> fmt::Debug for HttpRequest<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let res = writeln!(
writeln!(
f,
"\nHttpRequest {:?} {}:{}",
self.version(),
self.method(),
self.path()
);
)?;
if !self.query_string().is_empty() {
let _ = writeln!(f, " query: ?{:?}", self.query_string());
writeln!(f, " query: ?{:?}", self.query_string())?;
}
if !self.match_info().is_empty() {
let _ = writeln!(f, " params: {:?}", self.match_info());
writeln!(f, " params: {:?}", self.match_info())?;
}
let _ = writeln!(f, " headers:");
writeln!(f, " headers:")?;
for (key, val) in self.headers().iter() {
let _ = writeln!(f, " {:?}: {:?}", key, val);
writeln!(f, " {:?}: {:?}", key, val)?;
}
res
Ok(())
}
}

View File

@@ -48,10 +48,10 @@ impl HttpResponse {
self.0.as_mut()
}
/// Create http response builder with specific status.
/// Create a new HTTP response builder with specific status.
#[inline]
pub fn build(status: StatusCode) -> HttpResponseBuilder {
HttpResponsePool::get(status)
HttpResponseBuilder::new(status)
}
/// Create http response builder
@@ -246,7 +246,7 @@ impl HttpResponse {
self
}
/// Get body os this response
/// Get body of this response
#[inline]
pub fn body(&self) -> &Body {
&self.get_ref().body
@@ -272,7 +272,7 @@ impl HttpResponse {
self.get_mut().response_size = size;
}
/// Set write buffer capacity
/// Get write buffer capacity
pub fn write_buffer_capacity(&self) -> usize {
self.get_ref().write_capacity
}
@@ -346,6 +346,12 @@ pub struct HttpResponseBuilder {
}
impl HttpResponseBuilder {
/// Create a new HTTP response builder with specific status.
#[inline]
pub fn new(status: StatusCode) -> HttpResponseBuilder {
HttpResponsePool::get(status)
}
/// Set HTTP status code of this response.
#[inline]
pub fn status(&mut self, status: StatusCode) -> &mut Self {
@@ -366,7 +372,7 @@ impl HttpResponseBuilder {
self
}
/// Set a header.
/// Append a header.
///
/// ```rust
/// # extern crate actix_web;
@@ -394,7 +400,7 @@ impl HttpResponseBuilder {
self
}
/// Set a header.
/// Append a header.
///
/// ```rust
/// # extern crate actix_web;
@@ -426,6 +432,65 @@ impl HttpResponseBuilder {
}
self
}
/// Set or replace a header with a single value.
///
/// ```rust
/// # extern crate actix_web;
/// use actix_web::{http, HttpRequest, HttpResponse};
///
/// fn index(req: HttpRequest) -> HttpResponse {
/// HttpResponse::Ok()
/// .insert("X-TEST", "value")
/// .insert(http::header::CONTENT_TYPE, "application/json")
/// .finish()
/// }
/// fn main() {}
/// ```
pub fn insert<K, V>(&mut self, key: K, value: V) -> &mut Self
where
HeaderName: HttpTryFrom<K>,
V: IntoHeaderValue,
{
if let Some(parts) = parts(&mut self.response, &self.err) {
match HeaderName::try_from(key) {
Ok(key) => match value.try_into() {
Ok(value) => {
parts.headers.insert(key, value);
}
Err(e) => self.err = Some(e.into()),
},
Err(e) => self.err = Some(e.into()),
};
}
self
}
/// Remove all instances of a header already set on this `HttpResponseBuilder`.
///
/// ```rust
/// # extern crate actix_web;
/// use actix_web::{http, HttpRequest, HttpResponse};
///
/// fn index(req: HttpRequest) -> HttpResponse {
/// HttpResponse::Ok()
/// .header(http::header::CONTENT_TYPE, "nevermind") // won't be used
/// .remove(http::header::CONTENT_TYPE)
/// .finish()
/// }
/// ```
pub fn remove<K>(&mut self, key: K) -> &mut Self
where HeaderName: HttpTryFrom<K>
{
if let Some(parts) = parts(&mut self.response, &self.err) {
match HeaderName::try_from(key) {
Ok(key) => {
parts.headers.remove(key);
},
Err(e) => self.err = Some(e.into()),
};
}
self
}
/// Set the custom reason for the response.
#[inline]
@@ -1118,6 +1183,14 @@ mod tests {
assert_eq!((v.name(), v.value()), ("cookie3", "val300"));
}
#[test]
fn test_builder_new() {
let resp = HttpResponseBuilder::new(StatusCode::BAD_REQUEST)
.finish();
assert_eq!(resp.status(), StatusCode::BAD_REQUEST);
}
#[test]
fn test_basic_builder() {
let resp = HttpResponse::Ok()
@@ -1128,6 +1201,40 @@ mod tests {
assert_eq!(resp.status(), StatusCode::OK);
}
#[test]
fn test_insert() {
let resp = HttpResponse::Ok()
.insert("deleteme", "old value")
.insert("deleteme", "new value")
.finish();
assert_eq!("new value", resp.headers().get("deleteme").expect("new value"));
}
#[test]
fn test_remove() {
let resp = HttpResponse::Ok()
.header("deleteme", "value")
.remove("deleteme")
.finish();
assert!(resp.headers().get("deleteme").is_none())
}
#[test]
fn test_remove_replace() {
let resp = HttpResponse::Ok()
.header("some-header", "old_value1")
.header("some-header", "old_value2")
.remove("some-header")
.header("some-header", "new_value1")
.header("some-header", "new_value2")
.remove("unrelated-header")
.finish();
let mut v = resp.headers().get_all("some-header").into_iter();
assert_eq!("new_value1", v.next().unwrap());
assert_eq!("new_value2", v.next().unwrap());
assert_eq!(None, v.next());
}
#[test]
fn test_upgrade() {
let resp = HttpResponse::build(StatusCode::OK).upgrade().finish();

View File

@@ -16,7 +16,10 @@ pub struct ConnectionInfo {
impl ConnectionInfo {
/// Create *ConnectionInfo* instance for a request.
#[cfg_attr(feature = "cargo-clippy", allow(cyclomatic_complexity))]
#[cfg_attr(
feature = "cargo-clippy",
allow(cyclomatic_complexity)
)]
pub fn update(&mut self, req: &Request) {
let mut host = None;
let mut scheme = None;

View File

@@ -143,7 +143,7 @@ where
let req2 = req.clone();
let err = Rc::clone(&cfg.ehandler);
Box::new(
JsonBody::new(req)
JsonBody::new(req, Some(cfg))
.limit(cfg.limit)
.map_err(move |e| (*err)(e, &req2))
.map(Json),
@@ -155,6 +155,7 @@ where
///
/// ```rust
/// # extern crate actix_web;
/// extern crate mime;
/// #[macro_use] extern crate serde_derive;
/// use actix_web::{error, http, App, HttpResponse, Json, Result};
///
@@ -173,6 +174,9 @@ where
/// r.method(http::Method::POST)
/// .with_config(index, |cfg| {
/// cfg.0.limit(4096) // <- change json extractor configuration
/// .content_type(|mime| { // <- accept text/plain content type
/// mime.type_() == mime::TEXT && mime.subtype() == mime::PLAIN
/// })
/// .error_handler(|err, req| { // <- create custom error response
/// error::InternalError::from_response(
/// err, HttpResponse::Conflict().finish()).into()
@@ -184,6 +188,7 @@ where
pub struct JsonConfig<S> {
limit: usize,
ehandler: Rc<Fn(JsonPayloadError, &HttpRequest<S>) -> Error>,
content_type: Option<Box<Fn(mime::Mime) -> bool>>,
}
impl<S> JsonConfig<S> {
@@ -201,6 +206,15 @@ impl<S> JsonConfig<S> {
self.ehandler = Rc::new(f);
self
}
/// Set predicate for allowed content types
pub fn content_type<F>(&mut self, predicate: F) -> &mut Self
where
F: Fn(mime::Mime) -> bool + 'static,
{
self.content_type = Some(Box::new(predicate));
self
}
}
impl<S> Default for JsonConfig<S> {
@@ -208,6 +222,7 @@ impl<S> Default for JsonConfig<S> {
JsonConfig {
limit: 262_144,
ehandler: Rc::new(|e, _| e.into()),
content_type: None,
}
}
}
@@ -217,6 +232,7 @@ impl<S> Default for JsonConfig<S> {
/// Returns error:
///
/// * content type is not `application/json`
/// (unless specified in [`JsonConfig`](struct.JsonConfig.html))
/// * content length is greater than 256k
///
/// # Server example
@@ -253,10 +269,13 @@ pub struct JsonBody<T: HttpMessage, U: DeserializeOwned> {
impl<T: HttpMessage, U: DeserializeOwned> JsonBody<T, U> {
/// Create `JsonBody` for request.
pub fn new(req: &T) -> Self {
pub fn new<S>(req: &T, cfg: Option<&JsonConfig<S>>) -> Self {
// check content-type
let json = if let Ok(Some(mime)) = req.mime_type() {
mime.subtype() == mime::JSON || mime.suffix() == Some(mime::JSON)
mime.subtype() == mime::JSON || mime.suffix() == Some(mime::JSON) ||
cfg.map_or(false, |cfg| {
cfg.content_type.as_ref().map_or(false, |predicate| predicate(mime))
})
} else {
false
};
@@ -440,4 +459,61 @@ mod tests {
.finish();
assert!(handler.handle(&req).as_err().is_none())
}
#[test]
fn test_with_json_and_bad_content_type() {
let mut cfg = JsonConfig::default();
cfg.limit(4096);
let handler = With::new(|data: Json<MyObject>| data, cfg);
let req = TestRequest::with_header(
header::CONTENT_TYPE,
header::HeaderValue::from_static("text/plain"),
).header(
header::CONTENT_LENGTH,
header::HeaderValue::from_static("16"),
).set_payload(Bytes::from_static(b"{\"name\": \"test\"}"))
.finish();
assert!(handler.handle(&req).as_err().is_some())
}
#[test]
fn test_with_json_and_good_custom_content_type() {
let mut cfg = JsonConfig::default();
cfg.limit(4096);
cfg.content_type(|mime: mime::Mime| {
mime.type_() == mime::TEXT && mime.subtype() == mime::PLAIN
});
let handler = With::new(|data: Json<MyObject>| data, cfg);
let req = TestRequest::with_header(
header::CONTENT_TYPE,
header::HeaderValue::from_static("text/plain"),
).header(
header::CONTENT_LENGTH,
header::HeaderValue::from_static("16"),
).set_payload(Bytes::from_static(b"{\"name\": \"test\"}"))
.finish();
assert!(handler.handle(&req).as_err().is_none())
}
#[test]
fn test_with_json_and_bad_custom_content_type() {
let mut cfg = JsonConfig::default();
cfg.limit(4096);
cfg.content_type(|mime: mime::Mime| {
mime.type_() == mime::TEXT && mime.subtype() == mime::PLAIN
});
let handler = With::new(|data: Json<MyObject>| data, cfg);
let req = TestRequest::with_header(
header::CONTENT_TYPE,
header::HeaderValue::from_static("text/html"),
).header(
header::CONTENT_LENGTH,
header::HeaderValue::from_static("16"),
).set_payload(Bytes::from_static(b"{\"name\": \"test\"}"))
.finish();
assert!(handler.handle(&req).as_err().is_some())
}
}

View File

@@ -64,8 +64,8 @@
//! ## Package feature
//!
//! * `tls` - enables ssl support via `native-tls` crate
//! * `alpn` - enables ssl support via `openssl` crate, require for `http/2`
//! support
//! * `ssl` - enables ssl support via `openssl` crate, supports `http/2`
//! * `rust-tls` - enables ssl support via `rustls` crate, supports `http/2`
//! * `uds` - enables support for making client requests via Unix Domain Sockets.
//! Unix only. Not necessary for *serving* requests.
//! * `session` - enables session support, includes `ring` crate as
@@ -80,11 +80,8 @@
#![cfg_attr(actix_nightly, feature(
specialization, // for impl ErrorResponse for std::error::Error
extern_prelude,
tool_lints,
))]
#![cfg_attr(
feature = "cargo-clippy",
allow(decimal_literal_representation, suspicious_arithmetic_impl)
)]
#![warn(missing_docs)]
#[macro_use]
@@ -105,7 +102,6 @@ extern crate lazy_static;
extern crate futures;
extern crate cookie;
extern crate futures_cpupool;
extern crate htmlescape;
extern crate http as modhttp;
extern crate httparse;
extern crate language_tags;
@@ -140,6 +136,9 @@ extern crate serde_urlencoded;
extern crate percent_encoding;
extern crate serde_json;
extern crate smallvec;
extern crate v_htmlescape;
extern crate actix_net;
#[macro_use]
extern crate actix as actix_inner;
@@ -218,14 +217,12 @@ pub use server::Request;
pub mod actix {
//! Re-exports [actix's](https://docs.rs/actix/) prelude
extern crate actix;
pub use self::actix::actors::resolver;
pub use self::actix::actors::signal;
pub use self::actix::fut;
pub use self::actix::msgs;
pub use self::actix::prelude::*;
pub use self::actix::{run, spawn};
pub use super::actix_inner::actors::resolver;
pub use super::actix_inner::actors::signal;
pub use super::actix_inner::fut;
pub use super::actix_inner::msgs;
pub use super::actix_inner::prelude::*;
pub use super::actix_inner::{run, spawn};
}
#[cfg(feature = "openssl")]
@@ -256,7 +253,7 @@ pub mod dev {
pub use body::BodyStream;
pub use context::Drain;
pub use extractor::{FormConfig, PayloadConfig};
pub use extractor::{FormConfig, PayloadConfig, QueryConfig, PathConfig, EitherConfig, EitherCollisionStrategy};
pub use handler::{AsyncResult, Handler};
pub use httpmessage::{MessageBody, Readlines, UrlEncoded};
pub use httpresponse::HttpResponseBuilder;

View File

@@ -307,6 +307,32 @@ impl Cors {
}
}
fn access_control_allow_origin(&self, req: &Request) -> Option<HeaderValue> {
match self.inner.origins {
AllOrSome::All => {
if self.inner.send_wildcard {
Some(HeaderValue::from_static("*"))
} else if let Some(origin) = req.headers().get(header::ORIGIN) {
Some(origin.clone())
} else {
None
}
}
AllOrSome::Some(ref origins) => {
if let Some(origin) = req.headers().get(header::ORIGIN).filter(|o| {
match o.to_str() {
Ok(os) => origins.contains(os),
_ => false
}
}) {
Some(origin.clone())
} else {
Some(self.inner.origins_str.as_ref().unwrap().clone())
}
}
}
}
fn validate_allowed_method(&self, req: &Request) -> Result<(), CorsError> {
if let Some(hdr) = req.headers().get(header::ACCESS_CONTROL_REQUEST_METHOD) {
if let Ok(meth) = hdr.to_str() {
@@ -390,21 +416,9 @@ impl<S> Middleware<S> for Cors {
}).if_some(headers, |headers, resp| {
let _ =
resp.header(header::ACCESS_CONTROL_ALLOW_HEADERS, headers);
}).if_true(self.inner.origins.is_all(), |resp| {
if self.inner.send_wildcard {
resp.header(header::ACCESS_CONTROL_ALLOW_ORIGIN, "*");
} else {
let origin = req.headers().get(header::ORIGIN).unwrap();
resp.header(
header::ACCESS_CONTROL_ALLOW_ORIGIN,
origin.clone(),
);
}
}).if_true(self.inner.origins.is_some(), |resp| {
resp.header(
header::ACCESS_CONTROL_ALLOW_ORIGIN,
self.inner.origins_str.as_ref().unwrap().clone(),
);
}).if_some(self.access_control_allow_origin(&req), |origin, resp| {
let _ =
resp.header(header::ACCESS_CONTROL_ALLOW_ORIGIN, origin);
}).if_true(self.inner.supports_credentials, |resp| {
resp.header(header::ACCESS_CONTROL_ALLOW_CREDENTIALS, "true");
}).header(
@@ -430,25 +444,11 @@ impl<S> Middleware<S> for Cors {
fn response(
&self, req: &HttpRequest<S>, mut resp: HttpResponse,
) -> Result<Response> {
match self.inner.origins {
AllOrSome::All => {
if self.inner.send_wildcard {
resp.headers_mut().insert(
header::ACCESS_CONTROL_ALLOW_ORIGIN,
HeaderValue::from_static("*"),
);
} else if let Some(origin) = req.headers().get(header::ORIGIN) {
resp.headers_mut()
.insert(header::ACCESS_CONTROL_ALLOW_ORIGIN, origin.clone());
}
}
AllOrSome::Some(_) => {
resp.headers_mut().insert(
header::ACCESS_CONTROL_ALLOW_ORIGIN,
self.inner.origins_str.as_ref().unwrap().clone(),
);
}
}
if let Some(origin) = self.access_control_allow_origin(req) {
resp.headers_mut()
.insert(header::ACCESS_CONTROL_ALLOW_ORIGIN, origin.clone());
};
if let Some(ref expose) = self.inner.expose_hdrs {
resp.headers_mut().insert(
@@ -826,8 +826,8 @@ impl<S: 'static> CorsBuilder<S> {
if let AllOrSome::Some(ref origins) = cors.origins {
let s = origins
.iter()
.fold(String::new(), |s, v| s + &v.to_string());
cors.origins_str = Some(HeaderValue::try_from(s.as_str()).unwrap());
.fold(String::new(), |s, v| format!("{}, {}", s, v));
cors.origins_str = Some(HeaderValue::try_from(&s[2..]).unwrap());
}
if !self.expose_hdrs.is_empty() {
@@ -1122,15 +1122,21 @@ mod tests {
let cors = Cors::build()
.disable_vary_header()
.allowed_origin("https://www.example.com")
.allowed_origin("https://www.google.com")
.finish();
let resp: HttpResponse = HttpResponse::Ok().into();
let resp = cors.response(&req, resp).unwrap().response();
let origins_str = resp
.headers()
.get(header::ACCESS_CONTROL_ALLOW_ORIGIN)
.unwrap()
.to_str()
.unwrap();
assert_eq!(
&b"https://www.example.com"[..],
resp.headers()
.get(header::ACCESS_CONTROL_ALLOW_ORIGIN)
.unwrap()
.as_bytes()
"https://www.example.com",
origins_str
);
}
@@ -1167,4 +1173,80 @@ mod tests {
let response = srv.execute(request.send()).unwrap();
assert_eq!(response.status(), StatusCode::OK);
}
#[test]
fn test_multiple_origins() {
let cors = Cors::build()
.allowed_origin("https://example.com")
.allowed_origin("https://example.org")
.allowed_methods(vec![Method::GET])
.finish();
let req = TestRequest::with_header("Origin", "https://example.com")
.method(Method::GET)
.finish();
let resp: HttpResponse = HttpResponse::Ok().into();
let resp = cors.response(&req, resp).unwrap().response();
assert_eq!(
&b"https://example.com"[..],
resp.headers()
.get(header::ACCESS_CONTROL_ALLOW_ORIGIN)
.unwrap()
.as_bytes()
);
let req = TestRequest::with_header("Origin", "https://example.org")
.method(Method::GET)
.finish();
let resp: HttpResponse = HttpResponse::Ok().into();
let resp = cors.response(&req, resp).unwrap().response();
assert_eq!(
&b"https://example.org"[..],
resp.headers()
.get(header::ACCESS_CONTROL_ALLOW_ORIGIN)
.unwrap()
.as_bytes()
);
}
#[test]
fn test_multiple_origins_preflight() {
let cors = Cors::build()
.allowed_origin("https://example.com")
.allowed_origin("https://example.org")
.allowed_methods(vec![Method::GET])
.finish();
let req = TestRequest::with_header("Origin", "https://example.com")
.header(header::ACCESS_CONTROL_REQUEST_METHOD, "GET")
.method(Method::OPTIONS)
.finish();
let resp = cors.start(&req).ok().unwrap().response();
assert_eq!(
&b"https://example.com"[..],
resp.headers()
.get(header::ACCESS_CONTROL_ALLOW_ORIGIN)
.unwrap()
.as_bytes()
);
let req = TestRequest::with_header("Origin", "https://example.org")
.header(header::ACCESS_CONTROL_REQUEST_METHOD, "GET")
.method(Method::OPTIONS)
.finish();
let resp = cors.start(&req).ok().unwrap().response();
assert_eq!(
&b"https://example.org"[..],
resp.headers()
.get(header::ACCESS_CONTROL_ALLOW_ORIGIN)
.unwrap()
.as_bytes()
);
}
}

View File

@@ -76,7 +76,7 @@ impl ResponseError for CsrfError {
}
fn uri_origin(uri: &Uri) -> Option<String> {
match (uri.scheme_part(), uri.host(), uri.port()) {
match (uri.scheme_part(), uri.host(), uri.port_part().map(|port| port.as_u16())) {
(Some(scheme), Some(host), Some(port)) => {
Some(format!("{}://{}:{}", scheme, host, port))
}

View File

@@ -48,7 +48,7 @@
//! ```
use std::rc::Rc;
use cookie::{Cookie, CookieJar, Key};
use cookie::{Cookie, CookieJar, Key, SameSite};
use futures::future::{err as FutErr, ok as FutOk, FutureResult};
use futures::Future;
use time::Duration;
@@ -237,6 +237,7 @@ struct CookieIdentityInner {
domain: Option<String>,
secure: bool,
max_age: Option<Duration>,
same_site: Option<SameSite>,
}
impl CookieIdentityInner {
@@ -248,6 +249,7 @@ impl CookieIdentityInner {
domain: None,
secure: true,
max_age: None,
same_site: None,
}
}
@@ -268,6 +270,10 @@ impl CookieIdentityInner {
cookie.set_max_age(max_age);
}
if let Some(same_site) = self.same_site {
cookie.set_same_site(same_site);
}
let mut jar = CookieJar::new();
if some {
jar.private(&self.key).add(cookie);
@@ -370,6 +376,12 @@ impl CookieIdentityPolicy {
Rc::get_mut(&mut self.0).unwrap().max_age = Some(value);
self
}
/// Sets the `same_site` field in the session cookie being built.
pub fn same_site(mut self, same_site: SameSite) -> Self {
Rc::get_mut(&mut self.0).unwrap().same_site = Some(same_site);
self
}
}
impl<S> IdentityPolicy<S> for CookieIdentityPolicy {

View File

@@ -33,7 +33,8 @@
//!
//! ```rust
//! # extern crate actix_web;
//! use actix_web::{actix, server, App, HttpRequest, Result};
//! # extern crate actix;
//! use actix_web::{server, App, HttpRequest, Result};
//! use actix_web::middleware::session::{RequestSession, SessionStorage, CookieSessionBackend};
//!
//! fn index(req: HttpRequest) -> Result<&'static str> {

View File

@@ -441,13 +441,13 @@ where
impl<S> fmt::Debug for Field<S> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let res = writeln!(f, "\nMultipartField: {}", self.ct);
let _ = writeln!(f, " boundary: {}", self.inner.borrow().boundary);
let _ = writeln!(f, " headers:");
writeln!(f, "\nMultipartField: {}", self.ct)?;
writeln!(f, " boundary: {}", self.inner.borrow().boundary)?;
writeln!(f, " headers:")?;
for (key, val) in self.headers.iter() {
let _ = writeln!(f, " {:?}: {:?}", key, val);
writeln!(f, " {:?}: {:?}", key, val)?;
}
res
Ok(())
}
}

View File

@@ -8,7 +8,7 @@ use http::StatusCode;
use smallvec::SmallVec;
use error::{InternalError, ResponseError, UriSegmentError};
use uri::Url;
use uri::{Url, RESERVED_QUOTER};
/// A trait to abstract the idea of creating a new instance of a type from a
/// path parameter.
@@ -103,6 +103,17 @@ impl Params {
}
}
/// Get URL-decoded matched parameter by name without type conversion
pub fn get_decoded(&self, key: &str) -> Option<String> {
self.get(key).map(|value| {
if let Some(ref mut value) = RESERVED_QUOTER.requote(value.as_bytes()) {
Rc::make_mut(value).to_string()
} else {
value.to_string()
}
})
}
/// Get unprocessed part of path
pub fn unprocessed(&self) -> &str {
&self.url.path()[(self.tail as usize)..]
@@ -300,4 +311,24 @@ mod tests {
Ok(PathBuf::from_iter(vec!["seg2"]))
);
}
#[test]
fn test_get_param_by_name() {
let mut params = Params::new();
params.add_static("item1", "path");
params.add_static("item2", "http%3A%2F%2Flocalhost%3A80%2Ffoo");
assert_eq!(params.get("item0"), None);
assert_eq!(params.get_decoded("item0"), None);
assert_eq!(params.get("item1"), Some("path"));
assert_eq!(params.get_decoded("item1"), Some("path".to_string()));
assert_eq!(
params.get("item2"),
Some("http%3A%2F%2Flocalhost%3A80%2Ffoo")
);
assert_eq!(
params.get_decoded("item2"),
Some("http://localhost:80/foo".to_string())
);
}
}

View File

@@ -1,6 +1,8 @@
//! Payload stream
use bytes::{Bytes, BytesMut};
use futures::task::{current as current_task, Task};
#[cfg(not(test))]
use futures::task::current as current_task;
use futures::task::Task;
use futures::{Async, Poll, Stream};
use std::cell::RefCell;
use std::cmp;

View File

@@ -551,12 +551,12 @@ impl<S: 'static, H> ProcessResponse<S, H> {
if self.resp.as_ref().unwrap().status().is_server_error()
{
error!(
"Error occured during request handling, status: {} {}",
"Error occurred during request handling, status: {} {}",
self.resp.as_ref().unwrap().status(), err
);
} else {
warn!(
"Error occured during request handling: {}",
"Error occurred during request handling: {}",
err
);
}

View File

@@ -107,6 +107,12 @@ impl<S: 'static> Resource<S> {
self.routes.last_mut().unwrap().filter(pred::Post())
}
/// Register a new `PATCH` route.
pub fn patch(&mut self) -> &mut Route<S> {
self.routes.push(Route::default());
self.routes.last_mut().unwrap().filter(pred::Patch())
}
/// Register a new `PUT` route.
pub fn put(&mut self) -> &mut Route<S> {
self.routes.push(Route::default());

View File

@@ -57,7 +57,7 @@ impl<S: 'static> Route<S> {
pub(crate) fn compose(
&self, req: HttpRequest<S>, mws: Rc<Vec<Box<Middleware<S>>>>,
) -> AsyncResult<HttpResponse> {
AsyncResult::async(Box::new(Compose::new(req, mws, self.handler.clone())))
AsyncResult::future(Box::new(Compose::new(req, mws, self.handler.clone())))
}
/// Add match predicate to route.
@@ -134,8 +134,7 @@ impl<S: 'static> Route<S> {
/// }
/// ```
///
/// It is possible to use tuples for specifing multiple extractors for one
/// handler function.
/// It is possible to use multiple extractors for one handler function.
///
/// ```rust
/// # extern crate bytes;
@@ -152,9 +151,9 @@ impl<S: 'static> Route<S> {
///
/// /// extract path info using serde
/// fn index(
/// info: (Path<Info>, Query<HashMap<String, String>>, Json<Info>),
/// path: Path<Info>, query: Query<HashMap<String, String>>, body: Json<Info>,
/// ) -> Result<String> {
/// Ok(format!("Welcome {}!", info.0.username))
/// Ok(format!("Welcome {}!", path.username))
/// }
///
/// fn main() {

View File

@@ -832,7 +832,7 @@ impl ResourceDef {
}).expect("malformed param");
let (mut param, rem) = pattern.split_at(close_idx + 1);
param = &param[1..param.len() - 1]; // Remove outer brackets
let (name, pattern) = match param.find(":") {
let (name, pattern) = match param.find(':') {
Some(idx) => {
let (name, pattern) = param.split_at(idx);
(name, &pattern[1..])
@@ -849,7 +849,7 @@ impl ResourceDef {
fn parse(
mut pattern: &str, for_prefix: bool,
) -> (String, Vec<PatternElement>, bool, usize) {
if pattern.find("{").is_none() {
if pattern.find('{').is_none() {
return (
String::from(pattern),
vec![PatternElement::Str(String::from(pattern))],
@@ -861,7 +861,7 @@ impl ResourceDef {
let mut elems = Vec::new();
let mut re = String::from("^");
while let Some(idx) = pattern.find("{") {
while let Some(idx) = pattern.find('{') {
let (prefix, rem) = pattern.split_at(idx);
elems.push(PatternElement::Str(String::from(prefix)));
re.push_str(&escape(prefix));

View File

@@ -59,7 +59,10 @@ pub struct Scope<S> {
middlewares: Rc<Vec<Box<Middleware<S>>>>,
}
#[cfg_attr(feature = "cargo-clippy", allow(new_without_default_derive))]
#[cfg_attr(
feature = "cargo-clippy",
allow(new_without_default_derive)
)]
impl<S: 'static> Scope<S> {
/// Create a new scope
pub fn new(path: &str) -> Scope<S> {
@@ -353,7 +356,7 @@ impl<S: 'static> RouteHandler<S> for Scope<S> {
if self.middlewares.is_empty() {
self.router.handle(&req2)
} else {
AsyncResult::async(Box::new(Compose::new(
AsyncResult::future(Box::new(Compose::new(
req2,
Rc::clone(&self.router),
Rc::clone(&self.middlewares),
@@ -821,11 +824,9 @@ mod tests {
scope
.route("/path1", Method::GET, |_: HttpRequest<_>| {
HttpResponse::Ok()
}).route(
"/path1",
Method::DELETE,
|_: HttpRequest<_>| HttpResponse::Ok(),
)
}).route("/path1", Method::DELETE, |_: HttpRequest<_>| {
HttpResponse::Ok()
})
}).finish();
let req = TestRequest::with_uri("/app/path1").request();

View File

@@ -1,472 +0,0 @@
use std::sync::mpsc as sync_mpsc;
use std::time::{Duration, Instant};
use std::{io, net, thread};
use futures::{sync::mpsc, Future};
use mio;
use slab::Slab;
use tokio_timer::Delay;
use actix::{msgs::Execute, Arbiter, System};
use super::server::ServerCommand;
use super::worker::{Conn, WorkerClient};
use super::Token;
pub(crate) enum Command {
Pause,
Resume,
Stop,
Worker(WorkerClient),
}
struct ServerSocketInfo {
addr: net::SocketAddr,
token: Token,
handler: Token,
sock: mio::net::TcpListener,
timeout: Option<Instant>,
}
#[derive(Clone)]
pub(crate) struct AcceptNotify(mio::SetReadiness);
impl AcceptNotify {
pub(crate) fn new(ready: mio::SetReadiness) -> Self {
AcceptNotify(ready)
}
pub(crate) fn notify(&self) {
let _ = self.0.set_readiness(mio::Ready::readable());
}
}
impl Default for AcceptNotify {
fn default() -> Self {
AcceptNotify::new(mio::Registration::new2().1)
}
}
pub(crate) struct AcceptLoop {
cmd_reg: Option<mio::Registration>,
cmd_ready: mio::SetReadiness,
notify_reg: Option<mio::Registration>,
notify_ready: mio::SetReadiness,
tx: sync_mpsc::Sender<Command>,
rx: Option<sync_mpsc::Receiver<Command>>,
srv: Option<(
mpsc::UnboundedSender<ServerCommand>,
mpsc::UnboundedReceiver<ServerCommand>,
)>,
}
impl AcceptLoop {
pub fn new() -> AcceptLoop {
let (tx, rx) = sync_mpsc::channel();
let (cmd_reg, cmd_ready) = mio::Registration::new2();
let (notify_reg, notify_ready) = mio::Registration::new2();
AcceptLoop {
tx,
cmd_ready,
cmd_reg: Some(cmd_reg),
notify_ready,
notify_reg: Some(notify_reg),
rx: Some(rx),
srv: Some(mpsc::unbounded()),
}
}
pub fn send(&self, msg: Command) {
let _ = self.tx.send(msg);
let _ = self.cmd_ready.set_readiness(mio::Ready::readable());
}
pub fn get_notify(&self) -> AcceptNotify {
AcceptNotify::new(self.notify_ready.clone())
}
pub(crate) fn start(
&mut self, socks: Vec<Vec<(Token, net::TcpListener)>>,
workers: Vec<WorkerClient>,
) -> mpsc::UnboundedReceiver<ServerCommand> {
let (tx, rx) = self.srv.take().expect("Can not re-use AcceptInfo");
Accept::start(
self.rx.take().expect("Can not re-use AcceptInfo"),
self.cmd_reg.take().expect("Can not re-use AcceptInfo"),
self.notify_reg.take().expect("Can not re-use AcceptInfo"),
socks,
tx,
workers,
);
rx
}
}
struct Accept {
poll: mio::Poll,
rx: sync_mpsc::Receiver<Command>,
sockets: Slab<ServerSocketInfo>,
workers: Vec<WorkerClient>,
srv: mpsc::UnboundedSender<ServerCommand>,
timer: (mio::Registration, mio::SetReadiness),
next: usize,
backpressure: bool,
}
const DELTA: usize = 100;
const CMD: mio::Token = mio::Token(0);
const TIMER: mio::Token = mio::Token(1);
const NOTIFY: mio::Token = mio::Token(2);
/// This function defines errors that are per-connection. Which basically
/// means that if we get this error from `accept()` system call it means
/// next connection might be ready to be accepted.
///
/// All other errors will incur a timeout before next `accept()` is performed.
/// The timeout is useful to handle resource exhaustion errors like ENFILE
/// and EMFILE. Otherwise, could enter into tight loop.
fn connection_error(e: &io::Error) -> bool {
e.kind() == io::ErrorKind::ConnectionRefused
|| e.kind() == io::ErrorKind::ConnectionAborted
|| e.kind() == io::ErrorKind::ConnectionReset
}
impl Accept {
#![cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
pub(crate) fn start(
rx: sync_mpsc::Receiver<Command>, cmd_reg: mio::Registration,
notify_reg: mio::Registration, socks: Vec<Vec<(Token, net::TcpListener)>>,
srv: mpsc::UnboundedSender<ServerCommand>, workers: Vec<WorkerClient>,
) {
let sys = System::current();
// start accept thread
let _ = thread::Builder::new()
.name("actix-web accept loop".to_owned())
.spawn(move || {
System::set_current(sys);
let mut accept = Accept::new(rx, socks, workers, srv);
// Start listening for incoming commands
if let Err(err) = accept.poll.register(
&cmd_reg,
CMD,
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register Registration: {}", err);
}
// Start listening for notify updates
if let Err(err) = accept.poll.register(
&notify_reg,
NOTIFY,
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register Registration: {}", err);
}
accept.poll();
});
}
fn new(
rx: sync_mpsc::Receiver<Command>, socks: Vec<Vec<(Token, net::TcpListener)>>,
workers: Vec<WorkerClient>, srv: mpsc::UnboundedSender<ServerCommand>,
) -> Accept {
// Create a poll instance
let poll = match mio::Poll::new() {
Ok(poll) => poll,
Err(err) => panic!("Can not create mio::Poll: {}", err),
};
// Start accept
let mut sockets = Slab::new();
for (idx, srv_socks) in socks.into_iter().enumerate() {
for (hnd_token, lst) in srv_socks {
let addr = lst.local_addr().unwrap();
let server = mio::net::TcpListener::from_std(lst)
.expect("Can not create mio::net::TcpListener");
let entry = sockets.vacant_entry();
let token = entry.key();
// Start listening for incoming connections
if let Err(err) = poll.register(
&server,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register io: {}", err);
}
entry.insert(ServerSocketInfo {
addr,
token: hnd_token,
handler: Token(idx),
sock: server,
timeout: None,
});
}
}
// Timer
let (tm, tmr) = mio::Registration::new2();
if let Err(err) =
poll.register(&tm, TIMER, mio::Ready::readable(), mio::PollOpt::edge())
{
panic!("Can not register Registration: {}", err);
}
Accept {
poll,
rx,
sockets,
workers,
srv,
next: 0,
timer: (tm, tmr),
backpressure: false,
}
}
fn poll(&mut self) {
// Create storage for events
let mut events = mio::Events::with_capacity(128);
loop {
if let Err(err) = self.poll.poll(&mut events, None) {
panic!("Poll error: {}", err);
}
for event in events.iter() {
let token = event.token();
match token {
CMD => if !self.process_cmd() {
return;
},
TIMER => self.process_timer(),
NOTIFY => self.backpressure(false),
_ => {
let token = usize::from(token);
if token < DELTA {
continue;
}
self.accept(token - DELTA);
}
}
}
}
}
fn process_timer(&mut self) {
let now = Instant::now();
for (token, info) in self.sockets.iter_mut() {
if let Some(inst) = info.timeout.take() {
if now > inst {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not register server socket {}", err);
} else {
info!("Resume accepting connections on {}", info.addr);
}
} else {
info.timeout = Some(inst);
}
}
}
}
fn process_cmd(&mut self) -> bool {
loop {
match self.rx.try_recv() {
Ok(cmd) => match cmd {
Command::Pause => {
for (_, info) in self.sockets.iter_mut() {
if let Err(err) = self.poll.deregister(&info.sock) {
error!("Can not deregister server socket {}", err);
} else {
info!("Paused accepting connections on {}", info.addr);
}
}
}
Command::Resume => {
for (token, info) in self.sockets.iter() {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not resume socket accept process: {}", err);
} else {
info!(
"Accepting connections on {} has been resumed",
info.addr
);
}
}
}
Command::Stop => {
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
return false;
}
Command::Worker(worker) => {
self.backpressure(false);
self.workers.push(worker);
}
},
Err(err) => match err {
sync_mpsc::TryRecvError::Empty => break,
sync_mpsc::TryRecvError::Disconnected => {
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
return false;
}
},
}
}
true
}
fn backpressure(&mut self, on: bool) {
if self.backpressure {
if !on {
self.backpressure = false;
for (token, info) in self.sockets.iter() {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not resume socket accept process: {}", err);
} else {
info!("Accepting connections on {} has been resumed", info.addr);
}
}
}
} else if on {
self.backpressure = true;
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
}
}
fn accept_one(&mut self, mut msg: Conn<net::TcpStream>) {
if self.backpressure {
while !self.workers.is_empty() {
match self.workers[self.next].send(msg) {
Ok(_) => (),
Err(err) => {
let _ = self.srv.unbounded_send(ServerCommand::WorkerDied(
self.workers[self.next].idx,
));
msg = err.into_inner();
self.workers.swap_remove(self.next);
if self.workers.is_empty() {
error!("No workers");
return;
} else if self.workers.len() <= self.next {
self.next = 0;
}
continue;
}
}
self.next = (self.next + 1) % self.workers.len();
break;
}
} else {
let mut idx = 0;
while idx < self.workers.len() {
idx += 1;
if self.workers[self.next].available() {
match self.workers[self.next].send(msg) {
Ok(_) => {
self.next = (self.next + 1) % self.workers.len();
return;
}
Err(err) => {
let _ = self.srv.unbounded_send(ServerCommand::WorkerDied(
self.workers[self.next].idx,
));
msg = err.into_inner();
self.workers.swap_remove(self.next);
if self.workers.is_empty() {
error!("No workers");
self.backpressure(true);
return;
} else if self.workers.len() <= self.next {
self.next = 0;
}
continue;
}
}
}
self.next = (self.next + 1) % self.workers.len();
}
// enable backpressure
self.backpressure(true);
self.accept_one(msg);
}
}
fn accept(&mut self, token: usize) {
loop {
let msg = if let Some(info) = self.sockets.get_mut(token) {
match info.sock.accept_std() {
Ok((io, addr)) => Conn {
io,
token: info.token,
handler: info.handler,
peer: Some(addr),
},
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return,
Err(ref e) if connection_error(e) => continue,
Err(e) => {
error!("Error accepting connection: {}", e);
if let Err(err) = self.poll.deregister(&info.sock) {
error!("Can not deregister server socket {}", err);
}
// sleep after error
info.timeout = Some(Instant::now() + Duration::from_millis(500));
let r = self.timer.1.clone();
System::current().arbiter().do_send(Execute::new(
move || -> Result<(), ()> {
Arbiter::spawn(
Delay::new(
Instant::now() + Duration::from_millis(510),
).map_err(|_| ())
.and_then(move |_| {
let _ = r.set_readiness(mio::Ready::readable());
Ok(())
}),
);
Ok(())
},
));
return;
}
}
} else {
return;
};
self.accept_one(msg);
}
}
}

383
src/server/acceptor.rs Normal file
View File

@@ -0,0 +1,383 @@
use std::time::Duration;
use std::{fmt, net};
use actix_net::server::ServerMessage;
use actix_net::service::{NewService, Service};
use futures::future::{err, ok, Either, FutureResult};
use futures::{Async, Future, Poll};
use tokio_reactor::Handle;
use tokio_tcp::TcpStream;
use tokio_timer::{sleep, Delay};
use super::error::AcceptorError;
use super::IoStream;
/// This trait indicates types that can create acceptor service for http server.
pub trait AcceptorServiceFactory: Send + Clone + 'static {
type Io: IoStream + Send;
type NewService: NewService<Request = TcpStream, Response = Self::Io>;
fn create(&self) -> Self::NewService;
}
impl<F, T> AcceptorServiceFactory for F
where
F: Fn() -> T + Send + Clone + 'static,
T::Response: IoStream + Send,
T: NewService<Request = TcpStream>,
T::InitError: fmt::Debug,
{
type Io = T::Response;
type NewService = T;
fn create(&self) -> T {
(self)()
}
}
#[derive(Clone)]
/// Default acceptor service convert `TcpStream` to a `tokio_tcp::TcpStream`
pub(crate) struct DefaultAcceptor;
impl AcceptorServiceFactory for DefaultAcceptor {
type Io = TcpStream;
type NewService = DefaultAcceptor;
fn create(&self) -> Self::NewService {
DefaultAcceptor
}
}
impl NewService for DefaultAcceptor {
type Request = TcpStream;
type Response = TcpStream;
type Error = ();
type InitError = ();
type Service = DefaultAcceptor;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self) -> Self::Future {
ok(DefaultAcceptor)
}
}
impl Service for DefaultAcceptor {
type Request = TcpStream;
type Response = TcpStream;
type Error = ();
type Future = FutureResult<Self::Response, Self::Error>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, req: Self::Request) -> Self::Future {
ok(req)
}
}
pub(crate) struct TcpAcceptor<T> {
inner: T,
}
impl<T, E> TcpAcceptor<T>
where
T: NewService<Request = TcpStream, Error = AcceptorError<E>>,
T::InitError: fmt::Debug,
{
pub(crate) fn new(inner: T) -> Self {
TcpAcceptor { inner }
}
}
impl<T, E> NewService for TcpAcceptor<T>
where
T: NewService<Request = TcpStream, Error = AcceptorError<E>>,
T::InitError: fmt::Debug,
{
type Request = net::TcpStream;
type Response = T::Response;
type Error = AcceptorError<E>;
type InitError = T::InitError;
type Service = TcpAcceptorService<T::Service>;
type Future = TcpAcceptorResponse<T>;
fn new_service(&self) -> Self::Future {
TcpAcceptorResponse {
fut: self.inner.new_service(),
}
}
}
pub(crate) struct TcpAcceptorResponse<T>
where
T: NewService<Request = TcpStream>,
T::InitError: fmt::Debug,
{
fut: T::Future,
}
impl<T> Future for TcpAcceptorResponse<T>
where
T: NewService<Request = TcpStream>,
T::InitError: fmt::Debug,
{
type Item = TcpAcceptorService<T::Service>;
type Error = T::InitError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.fut.poll() {
Ok(Async::NotReady) => Ok(Async::NotReady),
Ok(Async::Ready(service)) => {
Ok(Async::Ready(TcpAcceptorService { inner: service }))
}
Err(e) => {
error!("Can not create accetor service: {:?}", e);
Err(e)
}
}
}
}
pub(crate) struct TcpAcceptorService<T> {
inner: T,
}
impl<T, E> Service for TcpAcceptorService<T>
where
T: Service<Request = TcpStream, Error = AcceptorError<E>>,
{
type Request = net::TcpStream;
type Response = T::Response;
type Error = AcceptorError<E>;
type Future = Either<T::Future, FutureResult<Self::Response, Self::Error>>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.inner.poll_ready()
}
fn call(&mut self, req: Self::Request) -> Self::Future {
let stream = TcpStream::from_std(req, &Handle::default()).map_err(|e| {
error!("Can not convert to an async tcp stream: {}", e);
AcceptorError::Io(e)
});
match stream {
Ok(stream) => Either::A(self.inner.call(stream)),
Err(e) => Either::B(err(e)),
}
}
}
#[doc(hidden)]
/// Acceptor timeout middleware
///
/// Applies timeout to request prcoessing.
pub struct AcceptorTimeout<T> {
inner: T,
timeout: Duration,
}
impl<T: NewService> AcceptorTimeout<T> {
/// Create new `AcceptorTimeout` instance. timeout is in milliseconds.
pub fn new(timeout: u64, inner: T) -> Self {
Self {
inner,
timeout: Duration::from_millis(timeout),
}
}
}
impl<T: NewService> NewService for AcceptorTimeout<T> {
type Request = T::Request;
type Response = T::Response;
type Error = AcceptorError<T::Error>;
type InitError = T::InitError;
type Service = AcceptorTimeoutService<T::Service>;
type Future = AcceptorTimeoutFut<T>;
fn new_service(&self) -> Self::Future {
AcceptorTimeoutFut {
fut: self.inner.new_service(),
timeout: self.timeout,
}
}
}
#[doc(hidden)]
pub struct AcceptorTimeoutFut<T: NewService> {
fut: T::Future,
timeout: Duration,
}
impl<T: NewService> Future for AcceptorTimeoutFut<T> {
type Item = AcceptorTimeoutService<T::Service>;
type Error = T::InitError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let inner = try_ready!(self.fut.poll());
Ok(Async::Ready(AcceptorTimeoutService {
inner,
timeout: self.timeout,
}))
}
}
#[doc(hidden)]
/// Acceptor timeout service
///
/// Applies timeout to request prcoessing.
pub struct AcceptorTimeoutService<T> {
inner: T,
timeout: Duration,
}
impl<T: Service> Service for AcceptorTimeoutService<T> {
type Request = T::Request;
type Response = T::Response;
type Error = AcceptorError<T::Error>;
type Future = AcceptorTimeoutResponse<T>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.inner.poll_ready().map_err(AcceptorError::Service)
}
fn call(&mut self, req: Self::Request) -> Self::Future {
AcceptorTimeoutResponse {
fut: self.inner.call(req),
sleep: sleep(self.timeout),
}
}
}
#[doc(hidden)]
pub struct AcceptorTimeoutResponse<T: Service> {
fut: T::Future,
sleep: Delay,
}
impl<T: Service> Future for AcceptorTimeoutResponse<T> {
type Item = T::Response;
type Error = AcceptorError<T::Error>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.fut.poll().map_err(AcceptorError::Service)? {
Async::NotReady => match self.sleep.poll() {
Err(_) => Err(AcceptorError::Timeout),
Ok(Async::Ready(_)) => Err(AcceptorError::Timeout),
Ok(Async::NotReady) => Ok(Async::NotReady),
},
Async::Ready(resp) => Ok(Async::Ready(resp)),
}
}
}
pub(crate) struct ServerMessageAcceptor<T> {
inner: T,
}
impl<T> ServerMessageAcceptor<T>
where
T: NewService<Request = net::TcpStream>,
{
pub(crate) fn new(inner: T) -> Self {
ServerMessageAcceptor { inner }
}
}
impl<T> NewService for ServerMessageAcceptor<T>
where
T: NewService<Request = net::TcpStream>,
{
type Request = ServerMessage;
type Response = ();
type Error = T::Error;
type InitError = T::InitError;
type Service = ServerMessageAcceptorService<T::Service>;
type Future = ServerMessageAcceptorResponse<T>;
fn new_service(&self) -> Self::Future {
ServerMessageAcceptorResponse {
fut: self.inner.new_service(),
}
}
}
pub(crate) struct ServerMessageAcceptorResponse<T>
where
T: NewService<Request = net::TcpStream>,
{
fut: T::Future,
}
impl<T> Future for ServerMessageAcceptorResponse<T>
where
T: NewService<Request = net::TcpStream>,
{
type Item = ServerMessageAcceptorService<T::Service>;
type Error = T::InitError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.fut.poll()? {
Async::NotReady => Ok(Async::NotReady),
Async::Ready(service) => Ok(Async::Ready(ServerMessageAcceptorService {
inner: service,
})),
}
}
}
pub(crate) struct ServerMessageAcceptorService<T> {
inner: T,
}
impl<T> Service for ServerMessageAcceptorService<T>
where
T: Service<Request = net::TcpStream>,
{
type Request = ServerMessage;
type Response = ();
type Error = T::Error;
type Future =
Either<ServerMessageAcceptorServiceFut<T>, FutureResult<(), Self::Error>>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.inner.poll_ready()
}
fn call(&mut self, req: Self::Request) -> Self::Future {
match req {
ServerMessage::Connect(stream) => {
Either::A(ServerMessageAcceptorServiceFut {
fut: self.inner.call(stream),
})
}
ServerMessage::Shutdown(_) => Either::B(ok(())),
ServerMessage::ForceShutdown => {
// self.settings
// .head()
// .traverse(|proto: &mut HttpProtocol<TcpStream, H>| proto.shutdown());
Either::B(ok(()))
}
}
}
}
pub(crate) struct ServerMessageAcceptorServiceFut<T: Service> {
fut: T::Future,
}
impl<T> Future for ServerMessageAcceptorServiceFut<T>
where
T: Service,
{
type Item = ();
type Error = T::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.fut.poll()? {
Async::NotReady => Ok(Async::NotReady),
Async::Ready(_) => Ok(Async::Ready(())),
}
}
}

134
src/server/builder.rs Normal file
View File

@@ -0,0 +1,134 @@
use std::{fmt, net};
use actix_net::either::Either;
use actix_net::server::{Server, ServiceFactory};
use actix_net::service::{NewService, NewServiceExt};
use super::acceptor::{
AcceptorServiceFactory, AcceptorTimeout, ServerMessageAcceptor, TcpAcceptor,
};
use super::error::AcceptorError;
use super::handler::IntoHttpHandler;
use super::service::{HttpService, StreamConfiguration};
use super::settings::{ServerSettings, ServiceConfig};
use super::KeepAlive;
pub(crate) trait ServiceProvider {
fn register(
&self,
server: Server,
lst: net::TcpListener,
host: String,
addr: net::SocketAddr,
keep_alive: KeepAlive,
secure: bool,
client_timeout: u64,
client_shutdown: u64,
) -> Server;
}
/// Utility type that builds complete http pipeline
pub(crate) struct HttpServiceBuilder<F, H, A>
where
F: Fn() -> H + Send + Clone,
{
factory: F,
acceptor: A,
}
impl<F, H, A> HttpServiceBuilder<F, H, A>
where
F: Fn() -> H + Send + Clone + 'static,
H: IntoHttpHandler,
A: AcceptorServiceFactory,
<A::NewService as NewService>::InitError: fmt::Debug,
{
/// Create http service builder
pub fn new(factory: F, acceptor: A) -> Self {
Self { factory, acceptor }
}
fn finish(
&self,
host: String,
addr: net::SocketAddr,
keep_alive: KeepAlive,
secure: bool,
client_timeout: u64,
client_shutdown: u64,
) -> impl ServiceFactory {
let factory = self.factory.clone();
let acceptor = self.acceptor.clone();
move || {
let app = (factory)().into_handler();
let settings = ServiceConfig::new(
app,
keep_alive,
client_timeout,
client_shutdown,
ServerSettings::new(addr, &host, false),
);
if secure {
Either::B(ServerMessageAcceptor::new(
TcpAcceptor::new(AcceptorTimeout::new(
client_timeout,
acceptor.create(),
)).map_err(|_| ())
.map_init_err(|_| ())
.and_then(StreamConfiguration::new().nodelay(true))
.and_then(
HttpService::new(settings)
.map_init_err(|_| ())
.map_err(|_| ()),
),
))
} else {
Either::A(ServerMessageAcceptor::new(
TcpAcceptor::new(acceptor.create().map_err(AcceptorError::Service))
.map_err(|_| ())
.map_init_err(|_| ())
.and_then(StreamConfiguration::new().nodelay(true))
.and_then(
HttpService::new(settings)
.map_init_err(|_| ())
.map_err(|_| ()),
),
))
}
}
}
}
impl<F, H, A> ServiceProvider for HttpServiceBuilder<F, H, A>
where
F: Fn() -> H + Send + Clone + 'static,
A: AcceptorServiceFactory,
<A::NewService as NewService>::InitError: fmt::Debug,
H: IntoHttpHandler,
{
fn register(
&self,
server: Server,
lst: net::TcpListener,
host: String,
addr: net::SocketAddr,
keep_alive: KeepAlive,
secure: bool,
client_timeout: u64,
client_shutdown: u64,
) -> Server {
server.listen2(
"actix-web",
lst,
self.finish(
host,
addr,
keep_alive,
secure,
client_timeout,
client_shutdown,
),
)
}
}

View File

@@ -1,22 +1,43 @@
use std::net::{Shutdown, SocketAddr};
use std::rc::Rc;
use std::{io, ptr, time};
use std::net::Shutdown;
use std::{io, mem, time};
use bytes::{Buf, BufMut, BytesMut};
use futures::{Async, Future, Poll};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_timer::Delay;
use super::settings::WorkerSettings;
use super::{h1, h2, ConnectionTag, HttpHandler, IoStream};
use super::error::HttpDispatchError;
use super::settings::ServiceConfig;
use super::{h1, h2, HttpHandler, IoStream};
use http::StatusCode;
const HTTP2_PREFACE: [u8; 14] = *b"PRI * HTTP/2.0";
enum HttpProtocol<T: IoStream, H: HttpHandler + 'static> {
H1(h1::Http1<T, H>),
pub(crate) enum HttpProtocol<T: IoStream, H: HttpHandler + 'static> {
H1(h1::Http1Dispatcher<T, H>),
H2(h2::Http2<T, H>),
Unknown(Rc<WorkerSettings<H>>, Option<SocketAddr>, T, BytesMut),
Unknown(ServiceConfig<H>, T, BytesMut),
None,
}
// impl<T: IoStream, H: HttpHandler + 'static> HttpProtocol<T, H> {
// fn shutdown_(&mut self) {
// match self {
// HttpProtocol::H1(ref mut h1) => {
// let io = h1.io();
// let _ = IoStream::set_linger(io, Some(time::Duration::new(0, 0)));
// let _ = IoStream::shutdown(io, Shutdown::Both);
// }
// HttpProtocol::H2(ref mut h2) => h2.shutdown(),
// HttpProtocol::Unknown(_, io, _) => {
// let _ = IoStream::set_linger(io, Some(time::Duration::new(0, 0)));
// let _ = IoStream::shutdown(io, Shutdown::Both);
// }
// HttpProtocol::None => (),
// }
// }
// }
enum ProtocolKind {
Http1,
Http2,
@@ -28,9 +49,8 @@ where
T: IoStream,
H: HttpHandler + 'static,
{
proto: Option<HttpProtocol<T, H>>,
node: Option<Node<HttpChannel<T, H>>>,
_tag: ConnectionTag,
proto: HttpProtocol<T, H>,
ka_timeout: Option<Delay>,
}
impl<T, H> HttpChannel<T, H>
@@ -38,32 +58,12 @@ where
T: IoStream,
H: HttpHandler + 'static,
{
pub(crate) fn new(
settings: Rc<WorkerSettings<H>>, io: T, peer: Option<SocketAddr>,
) -> HttpChannel<T, H> {
let _tag = settings.connection();
pub(crate) fn new(settings: ServiceConfig<H>, io: T) -> HttpChannel<T, H> {
let ka_timeout = settings.client_timer();
HttpChannel {
_tag,
node: None,
proto: Some(HttpProtocol::Unknown(
settings,
peer,
io,
BytesMut::with_capacity(8192),
)),
}
}
fn shutdown(&mut self) {
match self.proto {
Some(HttpProtocol::H1(ref mut h1)) => {
let io = h1.io();
let _ = IoStream::set_linger(io, Some(time::Duration::new(0, 0)));
let _ = IoStream::shutdown(io, Shutdown::Both);
}
Some(HttpProtocol::H2(ref mut h2)) => h2.shutdown(),
_ => (),
ka_timeout,
proto: HttpProtocol::Unknown(settings, io, BytesMut::with_capacity(8192)),
}
}
}
@@ -74,53 +74,38 @@ where
H: HttpHandler + 'static,
{
type Item = ();
type Error = ();
type Error = HttpDispatchError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.node.is_some() {
let el = self as *mut _;
self.node = Some(Node::new(el));
let _ = match self.proto {
Some(HttpProtocol::H1(ref mut h1)) => {
self.node.as_mut().map(|n| h1.settings().head().insert(n))
// keep-alive timer
if self.ka_timeout.is_some() {
match self.ka_timeout.as_mut().unwrap().poll() {
Ok(Async::Ready(_)) => {
trace!("Slow request timed out, close connection");
let proto = mem::replace(&mut self.proto, HttpProtocol::None);
if let HttpProtocol::Unknown(settings, io, buf) = proto {
self.proto = HttpProtocol::H1(h1::Http1Dispatcher::for_error(
settings,
io,
StatusCode::REQUEST_TIMEOUT,
self.ka_timeout.take(),
buf,
));
return self.poll();
}
return Ok(Async::Ready(()));
}
Some(HttpProtocol::H2(ref mut h2)) => {
self.node.as_mut().map(|n| h2.settings().head().insert(n))
}
Some(HttpProtocol::Unknown(ref mut settings, _, _, _)) => {
self.node.as_mut().map(|n| settings.head().insert(n))
}
None => unreachable!(),
};
Ok(Async::NotReady) => (),
Err(_) => panic!("Something is really wrong"),
}
}
let mut is_eof = false;
let kind = match self.proto {
Some(HttpProtocol::H1(ref mut h1)) => {
let result = h1.poll();
match result {
Ok(Async::Ready(())) | Err(_) => {
if let Some(n) = self.node.as_mut() {
n.remove()
};
}
_ => (),
}
return result;
}
Some(HttpProtocol::H2(ref mut h2)) => {
let result = h2.poll();
match result {
Ok(Async::Ready(())) | Err(_) => {
if let Some(n) = self.node.as_mut() {
n.remove()
};
}
_ => (),
}
return result;
}
Some(HttpProtocol::Unknown(_, _, ref mut io, ref mut buf)) => {
HttpProtocol::H1(ref mut h1) => return h1.poll(),
HttpProtocol::H2(ref mut h2) => return h2.poll(),
HttpProtocol::Unknown(_, ref mut io, ref mut buf) => {
let mut err = None;
let mut disconnect = false;
match io.read_available(buf) {
Ok(Async::Ready((read_some, stream_closed))) => {
@@ -130,17 +115,16 @@ where
disconnect = true;
}
}
Err(_) => {
disconnect = true;
Err(e) => {
err = Some(e.into());
}
_ => (),
}
if disconnect {
debug!("Ignored premature client disconnection");
if let Some(n) = self.node.as_mut() {
n.remove()
};
return Err(());
return Ok(Async::Ready(()));
} else if let Some(e) = err {
return Err(e);
}
if buf.len() >= 14 {
@@ -153,25 +137,30 @@ where
return Ok(Async::NotReady);
}
}
None => unreachable!(),
HttpProtocol::None => unreachable!(),
};
// upgrade to specific http protocol
if let Some(HttpProtocol::Unknown(settings, addr, io, buf)) = self.proto.take() {
let proto = mem::replace(&mut self.proto, HttpProtocol::None);
if let HttpProtocol::Unknown(settings, io, buf) = proto {
match kind {
ProtocolKind::Http1 => {
self.proto = Some(HttpProtocol::H1(h1::Http1::new(
settings, io, addr, buf, is_eof,
)));
self.proto = HttpProtocol::H1(h1::Http1Dispatcher::new(
settings,
io,
buf,
is_eof,
self.ka_timeout.take(),
));
return self.poll();
}
ProtocolKind::Http2 => {
self.proto = Some(HttpProtocol::H2(h2::Http2::new(
self.proto = HttpProtocol::H2(h2::Http2::new(
settings,
io,
addr,
buf.freeze(),
)));
self.ka_timeout.take(),
));
return self.poll();
}
}
@@ -180,79 +169,45 @@ where
}
}
pub(crate) struct Node<T> {
next: Option<*mut Node<T>>,
prev: Option<*mut Node<T>>,
element: *mut T,
#[doc(hidden)]
pub struct H1Channel<T, H>
where
T: IoStream,
H: HttpHandler + 'static,
{
proto: HttpProtocol<T, H>,
}
impl<T> Node<T> {
fn new(el: *mut T) -> Self {
Node {
next: None,
prev: None,
element: el,
}
}
fn insert<I>(&mut self, next: &mut Node<I>) {
unsafe {
let next: *mut Node<T> = next as *const _ as *mut _;
if let Some(ref mut next2) = self.next {
let n = next2.as_mut().unwrap();
n.prev = Some(next);
}
self.next = Some(next);
let next: &mut Node<T> = &mut *next;
next.prev = Some(self as *mut _);
}
}
fn remove(&mut self) {
unsafe {
self.element = ptr::null_mut();
let next = self.next.take();
let mut prev = self.prev.take();
if let Some(ref mut prev) = prev {
prev.as_mut().unwrap().next = next;
}
impl<T, H> H1Channel<T, H>
where
T: IoStream,
H: HttpHandler + 'static,
{
pub(crate) fn new(settings: ServiceConfig<H>, io: T) -> H1Channel<T, H> {
H1Channel {
proto: HttpProtocol::H1(h1::Http1Dispatcher::new(
settings,
io,
BytesMut::with_capacity(8192),
false,
None,
)),
}
}
}
impl Node<()> {
pub(crate) fn head() -> Self {
Node {
next: None,
prev: None,
element: ptr::null_mut(),
}
}
impl<T, H> Future for H1Channel<T, H>
where
T: IoStream,
H: HttpHandler + 'static,
{
type Item = ();
type Error = HttpDispatchError;
pub(crate) fn traverse<T, H>(&self)
where
T: IoStream,
H: HttpHandler + 'static,
{
let mut next = self.next.as_ref();
loop {
if let Some(n) = next {
unsafe {
let n: &Node<()> = &*(n.as_ref().unwrap() as *const _);
next = n.next.as_ref();
if !n.element.is_null() {
let ch: &mut HttpChannel<T, H> =
&mut *(&mut *(n.element as *mut _) as *mut () as *mut _);
ch.shutdown();
}
}
} else {
return;
}
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.proto {
HttpProtocol::H1(ref mut h1) => h1.poll(),
_ => unreachable!(),
}
}
}
@@ -290,6 +245,10 @@ where
fn set_linger(&mut self, _: Option<time::Duration>) -> io::Result<()> {
Ok(())
}
#[inline]
fn set_keepalive(&mut self, _: Option<time::Duration>) -> io::Result<()> {
Ok(())
}
}
impl<T> io::Read for WrapperStream<T>

View File

@@ -1,9 +1,84 @@
use std::io;
use futures::{Async, Poll};
use http2;
use super::{helpers, HttpHandlerTask, Writer};
use http::{StatusCode, Version};
use Error;
/// Errors produced by `AcceptorError` service.
#[derive(Debug)]
pub enum AcceptorError<T> {
/// The inner service error
Service(T),
/// Io specific error
Io(io::Error),
/// The request did not complete within the specified timeout.
Timeout,
}
#[derive(Fail, Debug)]
/// A set of errors that can occur during dispatching http requests
pub enum HttpDispatchError {
/// Application error
#[fail(display = "Application specific error: {}", _0)]
App(Error),
/// An `io::Error` that occurred while trying to read or write to a network
/// stream.
#[fail(display = "IO error: {}", _0)]
Io(io::Error),
/// The first request did not complete within the specified timeout.
#[fail(display = "The first request did not complete within the specified timeout")]
SlowRequestTimeout,
/// Shutdown timeout
#[fail(display = "Connection shutdown timeout")]
ShutdownTimeout,
/// HTTP2 error
#[fail(display = "HTTP2 error: {}", _0)]
Http2(http2::Error),
/// Payload is not consumed
#[fail(display = "Task is completed but request's payload is not consumed")]
PayloadIsNotConsumed,
/// Malformed request
#[fail(display = "Malformed request")]
MalformedRequest,
/// Internal error
#[fail(display = "Internal error")]
InternalError,
/// Unknown error
#[fail(display = "Unknown error")]
Unknown,
}
impl From<Error> for HttpDispatchError {
fn from(err: Error) -> Self {
HttpDispatchError::App(err)
}
}
impl From<io::Error> for HttpDispatchError {
fn from(err: io::Error) -> Self {
HttpDispatchError::Io(err)
}
}
impl From<http2::Error> for HttpDispatchError {
fn from(err: http2::Error) -> Self {
HttpDispatchError::Http2(err)
}
}
pub(crate) struct ServerError(Version, StatusCode);
impl ServerError {

File diff suppressed because it is too large Load Diff

View File

@@ -5,7 +5,7 @@ use futures::{Async, Poll};
use httparse;
use super::message::{MessageFlags, Request};
use super::settings::WorkerSettings;
use super::settings::ServiceConfig;
use error::ParseError;
use http::header::{HeaderName, HeaderValue};
use http::{header, HttpTryFrom, Method, Uri, Version};
@@ -18,8 +18,13 @@ pub(crate) struct H1Decoder {
decoder: Option<EncodingDecoder>,
}
#[derive(Debug)]
pub(crate) enum Message {
Message { msg: Request, payload: bool },
Message {
msg: Request,
payload: bool,
expect: bool,
},
Chunk(Bytes),
Eof,
}
@@ -42,7 +47,9 @@ impl H1Decoder {
}
pub fn decode<H>(
&mut self, src: &mut BytesMut, settings: &WorkerSettings<H>,
&mut self,
src: &mut BytesMut,
settings: &ServiceConfig<H>,
) -> Result<Option<Message>, DecoderError> {
// read payload
if self.decoder.is_some() {
@@ -60,10 +67,11 @@ impl H1Decoder {
.parse_message(src, settings)
.map_err(DecoderError::Error)?
{
Async::Ready((msg, decoder)) => {
Async::Ready((msg, expect, decoder)) => {
self.decoder = decoder;
Ok(Some(Message::Message {
msg,
expect,
payload: self.decoder.is_some(),
}))
}
@@ -79,12 +87,15 @@ impl H1Decoder {
}
fn parse_message<H>(
&self, buf: &mut BytesMut, settings: &WorkerSettings<H>,
) -> Poll<(Request, Option<EncodingDecoder>), ParseError> {
&self,
buf: &mut BytesMut,
settings: &ServiceConfig<H>,
) -> Poll<(Request, bool, Option<EncodingDecoder>), ParseError> {
// Parse http message
let mut has_upgrade = false;
let mut chunked = false;
let mut content_length = None;
let mut expect_continue = false;
let msg = {
// Unsafe: we read only this data only after httparse parses headers into.
@@ -152,23 +163,25 @@ impl H1Decoder {
}
// transfer-encoding
header::TRANSFER_ENCODING => {
if let Ok(s) = value.to_str() {
chunked = s.to_lowercase().contains("chunked");
if let Ok(s) = value.to_str().map(|s| s.trim()) {
chunked = s.eq_ignore_ascii_case("chunked");
} else {
return Err(ParseError::Header);
}
}
// connection keep-alive state
header::CONNECTION => {
let ka = if let Ok(conn) = value.to_str() {
let ka = if let Ok(conn) =
value.to_str().map(|conn| conn.trim())
{
if version == Version::HTTP_10
&& conn.contains("keep-alive")
&& conn.eq_ignore_ascii_case("keep-alive")
{
true
} else {
version == Version::HTTP_11 && !(conn
.contains("close")
|| conn.contains("upgrade"))
version == Version::HTTP_11
&& !(conn.eq_ignore_ascii_case("close")
|| conn.eq_ignore_ascii_case("upgrade"))
}
} else {
false
@@ -177,6 +190,18 @@ impl H1Decoder {
}
header::UPGRADE => {
has_upgrade = true;
// check content-length, some clients (dart)
// sends "content-length: 0" with websocket upgrade
if let Ok(val) = value.to_str().map(|val| val.trim()) {
if val.eq_ignore_ascii_case("websocket") {
content_length = None;
}
}
}
header::EXPECT => {
if value == "100-continue" {
expect_continue = true
}
}
_ => (),
}
@@ -208,7 +233,7 @@ impl H1Decoder {
None
};
Ok(Async::Ready((msg, decoder)))
Ok(Async::Ready((msg, expect_continue, decoder)))
}
}
@@ -220,7 +245,9 @@ pub(crate) struct HeaderIndex {
impl HeaderIndex {
pub(crate) fn record(
bytes: &[u8], headers: &[httparse::Header], indices: &mut [HeaderIndex],
bytes: &[u8],
headers: &[httparse::Header],
indices: &mut [HeaderIndex],
) {
let bytes_ptr = bytes.as_ptr() as usize;
for (header, indices) in headers.iter().zip(indices.iter_mut()) {
@@ -368,7 +395,10 @@ macro_rules! byte (
impl ChunkedState {
fn step(
&self, body: &mut BytesMut, size: &mut u64, buf: &mut Option<Bytes>,
&self,
body: &mut BytesMut,
size: &mut u64,
buf: &mut Option<Bytes>,
) -> Poll<ChunkedState, io::Error> {
use self::ChunkedState::*;
match *self {
@@ -431,7 +461,8 @@ impl ChunkedState {
}
}
fn read_size_lf(
rdr: &mut BytesMut, size: &mut u64,
rdr: &mut BytesMut,
size: &mut u64,
) -> Poll<ChunkedState, io::Error> {
match byte!(rdr) {
b'\n' if *size > 0 => Ok(Async::Ready(ChunkedState::Body)),
@@ -444,7 +475,9 @@ impl ChunkedState {
}
fn read_body(
rdr: &mut BytesMut, rem: &mut u64, buf: &mut Option<Bytes>,
rdr: &mut BytesMut,
rem: &mut u64,
buf: &mut Option<Bytes>,
) -> Poll<ChunkedState, io::Error> {
trace!("Chunked read, remaining={:?}", rem);

View File

@@ -1,7 +1,6 @@
// #![cfg_attr(feature = "cargo-clippy", allow(redundant_field_names))]
use std::io::{self, Write};
use std::rc::Rc;
use bytes::{BufMut, BytesMut};
use futures::{Async, Poll};
@@ -9,7 +8,7 @@ use tokio_io::AsyncWrite;
use super::helpers;
use super::output::{Output, ResponseInfo, ResponseLength};
use super::settings::WorkerSettings;
use super::settings::ServiceConfig;
use super::Request;
use super::{Writer, WriterState, MAX_WRITE_BUFFER_SIZE};
use body::{Binary, Body};
@@ -38,11 +37,11 @@ pub(crate) struct H1Writer<T: AsyncWrite, H: 'static> {
headers_size: u32,
buffer: Output,
buffer_capacity: usize,
settings: Rc<WorkerSettings<H>>,
settings: ServiceConfig<H>,
}
impl<T: AsyncWrite, H: 'static> H1Writer<T, H> {
pub fn new(stream: T, settings: Rc<WorkerSettings<H>>) -> H1Writer<T, H> {
pub fn new(stream: T, settings: ServiceConfig<H>) -> H1Writer<T, H> {
H1Writer {
flags: Flags::KEEPALIVE,
written: 0,
@@ -63,10 +62,18 @@ impl<T: AsyncWrite, H: 'static> H1Writer<T, H> {
self.flags = Flags::KEEPALIVE;
}
pub fn flushed(&mut self) -> bool {
self.buffer.is_empty()
}
pub fn disconnected(&mut self) {
self.flags.insert(Flags::DISCONNECTED);
}
pub fn upgrade(&self) -> bool {
self.flags.contains(Flags::UPGRADE)
}
pub fn keepalive(&self) -> bool {
self.flags.contains(Flags::KEEPALIVE) && !self.flags.contains(Flags::UPGRADE)
}
@@ -169,13 +176,11 @@ impl<T: AsyncWrite, H: 'static> Writer for H1Writer<T, H> {
buffer.extend_from_slice(reason);
// content length
let mut len_is_set = true;
match info.length {
ResponseLength::Chunked => {
buffer.extend_from_slice(b"\r\ntransfer-encoding: chunked\r\n")
}
ResponseLength::Zero => {
buffer.extend_from_slice(b"\r\ncontent-length: 0\r\n")
}
ResponseLength::Length(len) => {
helpers::write_content_length(len, &mut buffer)
}
@@ -184,6 +189,10 @@ impl<T: AsyncWrite, H: 'static> Writer for H1Writer<T, H> {
write!(buffer.writer(), "{}", len)?;
buffer.extend_from_slice(b"\r\n");
}
ResponseLength::Zero => {
len_is_set = false;
buffer.extend_from_slice(b"\r\n");
}
ResponseLength::None => buffer.extend_from_slice(b"\r\n"),
}
if let Some(ce) = info.content_encoding {
@@ -196,47 +205,57 @@ impl<T: AsyncWrite, H: 'static> Writer for H1Writer<T, H> {
let mut pos = 0;
let mut has_date = false;
let mut remaining = buffer.remaining_mut();
unsafe {
let mut buf = &mut *(buffer.bytes_mut() as *mut [u8]);
for (key, value) in msg.headers() {
match *key {
TRANSFER_ENCODING => continue,
CONTENT_ENCODING => if encoding != ContentEncoding::Identity {
continue;
},
CONTENT_LENGTH => match info.length {
ResponseLength::None => (),
_ => continue,
},
DATE => {
has_date = true;
let mut buf = unsafe { &mut *(buffer.bytes_mut() as *mut [u8]) };
for (key, value) in msg.headers() {
match *key {
TRANSFER_ENCODING => continue,
CONTENT_ENCODING => if encoding != ContentEncoding::Identity {
continue;
},
CONTENT_LENGTH => match info.length {
ResponseLength::None => (),
ResponseLength::Zero => {
len_is_set = true;
}
_ => (),
_ => continue,
},
DATE => {
has_date = true;
}
_ => (),
}
let v = value.as_ref();
let k = key.as_str().as_bytes();
let len = k.len() + v.len() + 4;
if len > remaining {
let v = value.as_ref();
let k = key.as_str().as_bytes();
let len = k.len() + v.len() + 4;
if len > remaining {
unsafe {
buffer.advance_mut(pos);
pos = 0;
buffer.reserve(len);
remaining = buffer.remaining_mut();
}
pos = 0;
buffer.reserve(len);
remaining = buffer.remaining_mut();
unsafe {
buf = &mut *(buffer.bytes_mut() as *mut _);
}
buf[pos..pos + k.len()].copy_from_slice(k);
pos += k.len();
buf[pos..pos + 2].copy_from_slice(b": ");
pos += 2;
buf[pos..pos + v.len()].copy_from_slice(v);
pos += v.len();
buf[pos..pos + 2].copy_from_slice(b"\r\n");
pos += 2;
remaining -= len;
}
buf[pos..pos + k.len()].copy_from_slice(k);
pos += k.len();
buf[pos..pos + 2].copy_from_slice(b": ");
pos += 2;
buf[pos..pos + v.len()].copy_from_slice(v);
pos += v.len();
buf[pos..pos + 2].copy_from_slice(b"\r\n");
pos += 2;
remaining -= len;
}
unsafe {
buffer.advance_mut(pos);
}
if !len_is_set {
buffer.extend_from_slice(b"content-length: 0\r\n")
}
// optimized date header, set_date writes \r\n
if !has_date {

View File

@@ -2,7 +2,7 @@ use std::collections::VecDeque;
use std::io::{Read, Write};
use std::net::SocketAddr;
use std::rc::Rc;
use std::time::{Duration, Instant};
use std::time::Instant;
use std::{cmp, io, mem};
use bytes::{Buf, Bytes};
@@ -19,15 +19,16 @@ use http::{StatusCode, Version};
use payload::{Payload, PayloadStatus, PayloadWriter};
use uri::Url;
use super::error::ServerError;
use super::error::{HttpDispatchError, ServerError};
use super::h2writer::H2Writer;
use super::input::PayloadType;
use super::settings::WorkerSettings;
use super::settings::ServiceConfig;
use super::{HttpHandler, HttpHandlerTask, IoStream, Writer};
bitflags! {
struct Flags: u8 {
const DISCONNECTED = 0b0000_0010;
const DISCONNECTED = 0b0000_0001;
const SHUTDOWN = 0b0000_0010;
}
}
@@ -38,12 +39,13 @@ where
H: HttpHandler + 'static,
{
flags: Flags,
settings: Rc<WorkerSettings<H>>,
settings: ServiceConfig<H>,
addr: Option<SocketAddr>,
state: State<IoWrapper<T>>,
tasks: VecDeque<Entry<H>>,
keepalive_timer: Option<Delay>,
extensions: Option<Rc<Extensions>>,
ka_expire: Instant,
ka_timer: Option<Delay>,
}
enum State<T: AsyncRead + AsyncWrite> {
@@ -58,9 +60,23 @@ where
H: HttpHandler + 'static,
{
pub fn new(
settings: Rc<WorkerSettings<H>>, io: T, addr: Option<SocketAddr>, buf: Bytes,
settings: ServiceConfig<H>,
io: T,
buf: Bytes,
keepalive_timer: Option<Delay>,
) -> Self {
let addr = io.peer_addr();
let extensions = io.extensions();
// keep-alive timeout
let (ka_expire, ka_timer) = if let Some(delay) = keepalive_timer {
(delay.deadline(), Some(delay))
} else if let Some(delay) = settings.keep_alive_timer() {
(delay.deadline(), Some(delay))
} else {
(settings.now(), None)
};
Http2 {
flags: Flags::empty(),
tasks: VecDeque::new(),
@@ -68,39 +84,25 @@ where
unread: if buf.is_empty() { None } else { Some(buf) },
inner: io,
})),
keepalive_timer: None,
addr,
settings,
extensions,
ka_expire,
ka_timer,
}
}
pub(crate) fn shutdown(&mut self) {
self.state = State::Empty;
self.tasks.clear();
self.keepalive_timer.take();
}
pub fn poll(&mut self) -> Poll<(), HttpDispatchError> {
self.poll_keepalive()?;
pub fn settings(&self) -> &WorkerSettings<H> {
self.settings.as_ref()
}
pub fn poll(&mut self) -> Poll<(), ()> {
// server
if let State::Connection(ref mut conn) = self.state {
// keep-alive timer
if let Some(ref mut timeout) = self.keepalive_timer {
match timeout.poll() {
Ok(Async::Ready(_)) => {
trace!("Keep-alive timeout, close connection");
return Ok(Async::Ready(()));
}
Ok(Async::NotReady) => (),
Err(_) => unreachable!(),
}
}
loop {
// shutdown connection
if self.flags.contains(Flags::SHUTDOWN) {
return conn.poll_close().map_err(|e| e.into());
}
let mut not_ready = true;
let disconnected = self.flags.contains(Flags::DISCONNECTED);
@@ -215,51 +217,30 @@ where
not_ready = false;
let (parts, body) = req.into_parts();
// stop keepalive timer
self.keepalive_timer.take();
// update keep-alive expire
if self.ka_timer.is_some() {
if let Some(expire) = self.settings.keep_alive_expire() {
self.ka_expire = expire;
}
}
self.tasks.push_back(Entry::new(
parts,
body,
resp,
self.addr,
&self.settings,
self.settings.clone(),
self.extensions.clone(),
));
}
Ok(Async::NotReady) => {
// start keep-alive timer
if self.tasks.is_empty() {
if self.settings.keep_alive_enabled() {
let keep_alive = self.settings.keep_alive();
if keep_alive > 0 && self.keepalive_timer.is_none() {
trace!("Start keep-alive timer");
let mut timeout = Delay::new(
Instant::now()
+ Duration::new(keep_alive, 0),
);
// register timeout
let _ = timeout.poll();
self.keepalive_timer = Some(timeout);
}
} else {
// keep-alive disable, drop connection
return conn.poll_close().map_err(|e| {
error!("Error during connection close: {}", e)
});
}
} else {
// keep-alive unset, rely on operating system
return Ok(Async::NotReady);
}
}
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(err) => {
trace!("Connection error: {}", err);
self.flags.insert(Flags::DISCONNECTED);
self.flags.insert(Flags::SHUTDOWN);
for entry in &mut self.tasks {
entry.task.disconnected()
}
self.keepalive_timer.take();
continue;
}
}
}
@@ -267,9 +248,7 @@ where
if not_ready {
if self.tasks.is_empty() && self.flags.contains(Flags::DISCONNECTED)
{
return conn
.poll_close()
.map_err(|e| error!("Error during connection close: {}", e));
return conn.poll_close().map_err(|e| e.into());
} else {
return Ok(Async::NotReady);
}
@@ -284,7 +263,7 @@ where
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(err) => {
trace!("Error handling connection: {}", err);
return Err(());
return Err(err.into());
}
}
} else {
@@ -293,6 +272,39 @@ where
self.poll()
}
/// keep-alive timer. returns `true` is keep-alive, otherwise drop
fn poll_keepalive(&mut self) -> Result<(), HttpDispatchError> {
if let Some(ref mut timer) = self.ka_timer {
match timer.poll() {
Ok(Async::Ready(_)) => {
// if we get timer during shutdown, just drop connection
if self.flags.contains(Flags::SHUTDOWN) {
return Err(HttpDispatchError::ShutdownTimeout);
}
if timer.deadline() >= self.ka_expire {
// check for any outstanding request handling
if self.tasks.is_empty() {
return Err(HttpDispatchError::ShutdownTimeout);
} else if let Some(dl) = self.settings.keep_alive_expire() {
timer.reset(dl);
let _ = timer.poll();
}
} else {
timer.reset(self.ka_expire);
let _ = timer.poll();
}
}
Ok(Async::NotReady) => (),
Err(e) => {
error!("Timer error {:?}", e);
return Err(HttpDispatchError::Unknown);
}
}
}
Ok(())
}
}
bitflags! {
@@ -341,8 +353,11 @@ struct Entry<H: HttpHandler + 'static> {
impl<H: HttpHandler + 'static> Entry<H> {
fn new(
parts: Parts, recv: RecvStream, resp: SendResponse<Bytes>,
addr: Option<SocketAddr>, settings: &Rc<WorkerSettings<H>>,
parts: Parts,
recv: RecvStream,
resp: SendResponse<Bytes>,
addr: Option<SocketAddr>,
settings: ServiceConfig<H>,
extensions: Option<Rc<Extensions>>,
) -> Entry<H>
where
@@ -367,28 +382,20 @@ impl<H: HttpHandler + 'static> Entry<H> {
let psender = PayloadType::new(msg.headers(), psender);
// start request processing
let mut task = None;
for h in settings.handlers().iter() {
msg = match h.handle(msg) {
Ok(t) => {
task = Some(t);
break;
}
Err(msg) => msg,
}
}
let task = match settings.handler().handle(msg) {
Ok(task) => EntryPipe::Task(task),
Err(_) => EntryPipe::Error(ServerError::err(
Version::HTTP_2,
StatusCode::NOT_FOUND,
)),
};
Entry {
task: task.map(EntryPipe::Task).unwrap_or_else(|| {
EntryPipe::Error(ServerError::err(
Version::HTTP_2,
StatusCode::NOT_FOUND,
))
}),
payload: psender,
stream: H2Writer::new(resp, Rc::clone(settings)),
flags: EntryFlags::empty(),
task,
recv,
payload: psender,
stream: H2Writer::new(resp, settings),
flags: EntryFlags::empty(),
}
}

View File

@@ -1,25 +1,27 @@
#![cfg_attr(feature = "cargo-clippy", allow(redundant_field_names))]
#![cfg_attr(
feature = "cargo-clippy",
allow(redundant_field_names)
)]
use std::{cmp, io};
use bytes::{Bytes, BytesMut};
use futures::{Async, Poll};
use http2::server::SendResponse;
use http2::{Reason, SendStream};
use modhttp::Response;
use std::rc::Rc;
use std::{cmp, io};
use http::{HttpTryFrom, Method, Version};
use super::helpers;
use super::message::Request;
use super::output::{Output, ResponseInfo, ResponseLength};
use super::settings::WorkerSettings;
use super::settings::ServiceConfig;
use super::{Writer, WriterState, MAX_WRITE_BUFFER_SIZE};
use body::{Binary, Body};
use header::ContentEncoding;
use http::header::{
HeaderValue, CONNECTION, CONTENT_ENCODING, CONTENT_LENGTH, DATE, TRANSFER_ENCODING,
};
use http::{HttpTryFrom, Method, Version};
use httpresponse::HttpResponse;
const CHUNK_SIZE: usize = 16_384;
@@ -40,13 +42,11 @@ pub(crate) struct H2Writer<H: 'static> {
written: u64,
buffer: Output,
buffer_capacity: usize,
settings: Rc<WorkerSettings<H>>,
settings: ServiceConfig<H>,
}
impl<H: 'static> H2Writer<H> {
pub fn new(
respond: SendResponse<Bytes>, settings: Rc<WorkerSettings<H>>,
) -> H2Writer<H> {
pub fn new(respond: SendResponse<Bytes>, settings: ServiceConfig<H>) -> H2Writer<H> {
H2Writer {
stream: None,
flags: Flags::empty(),
@@ -96,6 +96,7 @@ impl<H: 'static> Writer for H2Writer<H> {
let mut has_date = false;
let mut resp = Response::new(());
let mut len_is_set = false;
*resp.status_mut() = msg.status();
*resp.version_mut() = Version::HTTP_2;
for (key, value) in msg.headers().iter() {
@@ -107,6 +108,9 @@ impl<H: 'static> Writer for H2Writer<H> {
},
CONTENT_LENGTH => match info.length {
ResponseLength::None => (),
ResponseLength::Zero => {
len_is_set = true;
}
_ => continue,
},
DATE => has_date = true,
@@ -126,8 +130,10 @@ impl<H: 'static> Writer for H2Writer<H> {
// content length
match info.length {
ResponseLength::Zero => {
resp.headers_mut()
.insert(CONTENT_LENGTH, HeaderValue::from_static("0"));
if !len_is_set {
resp.headers_mut()
.insert(CONTENT_LENGTH, HeaderValue::from_static("0"));
}
self.flags.insert(Flags::EOF);
}
ResponseLength::Length(len) => {
@@ -144,6 +150,9 @@ impl<H: 'static> Writer for H2Writer<H> {
resp.headers_mut()
.insert(CONTENT_LENGTH, HeaderValue::try_from(l.as_str()).unwrap());
}
ResponseLength::None => {
self.flags.insert(Flags::EOF);
}
_ => (),
}
if let Some(ce) = info.content_encoding {
@@ -225,6 +234,16 @@ impl<H: 'static> Writer for H2Writer<H> {
stream.reserve_capacity(cmp::min(self.buffer.len(), CHUNK_SIZE));
}
if self.flags.contains(Flags::EOF)
&& !self.flags.contains(Flags::RESERVED)
&& self.buffer.is_empty()
{
if let Err(e) = stream.send_data(Bytes::new(), true) {
return Err(io::Error::new(io::ErrorKind::Other, e));
}
return Ok(Async::Ready(()));
}
loop {
match stream.poll_capacity() {
Ok(Async::NotReady) => return Ok(Async::NotReady),

208
src/server/handler.rs Normal file
View File

@@ -0,0 +1,208 @@
use futures::{Async, Future, Poll};
use super::message::Request;
use super::Writer;
use error::Error;
/// Low level http request handler
#[allow(unused_variables)]
pub trait HttpHandler: 'static {
/// Request handling task
type Task: HttpHandlerTask;
/// Handle request
fn handle(&self, req: Request) -> Result<Self::Task, Request>;
}
impl HttpHandler for Box<HttpHandler<Task = Box<HttpHandlerTask>>> {
type Task = Box<HttpHandlerTask>;
fn handle(&self, req: Request) -> Result<Box<HttpHandlerTask>, Request> {
self.as_ref().handle(req)
}
}
/// Low level http request handler
pub trait HttpHandlerTask {
/// Poll task, this method is used before or after *io* object is available
fn poll_completed(&mut self) -> Poll<(), Error> {
Ok(Async::Ready(()))
}
/// Poll task when *io* object is available
fn poll_io(&mut self, io: &mut Writer) -> Poll<bool, Error>;
/// Connection is disconnected
fn disconnected(&mut self) {}
}
impl HttpHandlerTask for Box<HttpHandlerTask> {
fn poll_io(&mut self, io: &mut Writer) -> Poll<bool, Error> {
self.as_mut().poll_io(io)
}
}
pub(super) struct HttpHandlerTaskFut<T: HttpHandlerTask> {
task: T,
}
impl<T: HttpHandlerTask> HttpHandlerTaskFut<T> {
pub(crate) fn new(task: T) -> Self {
Self { task }
}
}
impl<T: HttpHandlerTask> Future for HttpHandlerTaskFut<T> {
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<(), ()> {
self.task.poll_completed().map_err(|_| ())
}
}
/// Conversion helper trait
pub trait IntoHttpHandler {
/// The associated type which is result of conversion.
type Handler: HttpHandler;
/// Convert into `HttpHandler` object.
fn into_handler(self) -> Self::Handler;
}
impl<T: HttpHandler> IntoHttpHandler for T {
type Handler = T;
fn into_handler(self) -> Self::Handler {
self
}
}
impl<T: IntoHttpHandler> IntoHttpHandler for Vec<T> {
type Handler = VecHttpHandler<T::Handler>;
fn into_handler(self) -> Self::Handler {
VecHttpHandler(self.into_iter().map(|item| item.into_handler()).collect())
}
}
#[doc(hidden)]
pub struct VecHttpHandler<H: HttpHandler>(Vec<H>);
impl<H: HttpHandler> HttpHandler for VecHttpHandler<H> {
type Task = H::Task;
fn handle(&self, mut req: Request) -> Result<Self::Task, Request> {
for h in &self.0 {
req = match h.handle(req) {
Ok(task) => return Ok(task),
Err(e) => e,
};
}
Err(req)
}
}
macro_rules! http_handler ({$EN:ident, $(($n:tt, $T:ident)),+} => {
impl<$($T: HttpHandler,)+> HttpHandler for ($($T,)+) {
type Task = $EN<$($T,)+>;
fn handle(&self, mut req: Request) -> Result<Self::Task, Request> {
$(
req = match self.$n.handle(req) {
Ok(task) => return Ok($EN::$T(task)),
Err(e) => e,
};
)+
Err(req)
}
}
#[doc(hidden)]
pub enum $EN<$($T: HttpHandler,)+> {
$($T ($T::Task),)+
}
impl<$($T: HttpHandler,)+> HttpHandlerTask for $EN<$($T,)+>
{
fn poll_completed(&mut self) -> Poll<(), Error> {
match self {
$($EN :: $T(ref mut task) => task.poll_completed(),)+
}
}
fn poll_io(&mut self, io: &mut Writer) -> Poll<bool, Error> {
match self {
$($EN::$T(ref mut task) => task.poll_io(io),)+
}
}
/// Connection is disconnected
fn disconnected(&mut self) {
match self {
$($EN::$T(ref mut task) => task.disconnected(),)+
}
}
}
});
http_handler!(HttpHandlerTask1, (0, A));
http_handler!(HttpHandlerTask2, (0, A), (1, B));
http_handler!(HttpHandlerTask3, (0, A), (1, B), (2, C));
http_handler!(HttpHandlerTask4, (0, A), (1, B), (2, C), (3, D));
http_handler!(HttpHandlerTask5, (0, A), (1, B), (2, C), (3, D), (4, E));
http_handler!(
HttpHandlerTask6,
(0, A),
(1, B),
(2, C),
(3, D),
(4, E),
(5, F)
);
http_handler!(
HttpHandlerTask7,
(0, A),
(1, B),
(2, C),
(3, D),
(4, E),
(5, F),
(6, G)
);
http_handler!(
HttpHandlerTask8,
(0, A),
(1, B),
(2, C),
(3, D),
(4, E),
(5, F),
(6, G),
(7, H)
);
http_handler!(
HttpHandlerTask9,
(0, A),
(1, B),
(2, C),
(3, D),
(4, E),
(5, F),
(6, G),
(7, H),
(8, I)
);
http_handler!(
HttpHandlerTask10,
(0, A),
(1, B),
(2, C),
(3, D),
(4, E),
(5, F),
(6, G),
(7, H),
(8, I),
(9, J)
);

View File

@@ -29,20 +29,24 @@ pub(crate) fn write_status_line(version: Version, mut n: u16, bytes: &mut BytesM
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
let four = n > 999;
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe {
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(curr), 2);
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0';
} else {
let d1 = n << 1;
curr -= 2;
}
} else {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(
lut_ptr.offset(d1 as isize),
buf_ptr.offset(curr),
@@ -74,7 +78,7 @@ pub fn write_content_length(mut n: usize, bytes: &mut BytesMut) {
let d1 = n << 1;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().offset(d1 as isize),
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(18),
2,
);
@@ -90,7 +94,7 @@ pub fn write_content_length(mut n: usize, bytes: &mut BytesMut) {
n /= 100;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().offset(d1 as isize),
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(19),
2,
)
@@ -107,47 +111,55 @@ pub fn write_content_length(mut n: usize, bytes: &mut BytesMut) {
}
pub(crate) fn convert_usize(mut n: usize, bytes: &mut BytesMut) {
unsafe {
let mut curr: isize = 39;
let mut buf: [u8; 41] = mem::uninitialized();
buf[39] = b'\r';
buf[40] = b'\n';
let buf_ptr = buf.as_mut_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
let mut curr: isize = 39;
let mut buf: [u8; 41] = unsafe { mem::uninitialized() };
buf[39] = b'\r';
buf[40] = b'\n';
let buf_ptr = buf.as_mut_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
// eagerly decode 4 characters at a time
while n >= 10_000 {
let rem = (n % 10_000) as isize;
n /= 10_000;
// eagerly decode 4 characters at a time
while n >= 10_000 {
let rem = (n % 10_000) as isize;
n /= 10_000;
let d1 = (rem / 100) << 1;
let d2 = (rem % 100) << 1;
curr -= 4;
let d1 = (rem / 100) << 1;
let d2 = (rem % 100) << 1;
curr -= 4;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
ptr::copy_nonoverlapping(lut_ptr.offset(d2), buf_ptr.offset(curr + 2), 2);
}
}
// if we reach here numbers are <= 9999, so at most 4 chars long
let mut n = n as isize; // possibly reduce 64bit math
// if we reach here numbers are <= 9999, so at most 4 chars long
let mut n = n as isize; // possibly reduce 64bit math
// decode 2 more chars, if > 2 chars
if n >= 100 {
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
// decode 2 more chars, if > 2 chars
if n >= 100 {
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0';
} else {
let d1 = n << 1;
curr -= 2;
}
} else {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
unsafe {
bytes.extend_from_slice(slice::from_raw_parts(
buf_ptr.offset(curr),
41 - curr as usize,

View File

@@ -1,47 +1,48 @@
use std::marker::PhantomData;
use std::rc::Rc;
use std::sync::Arc;
use std::{io, mem, net, time};
use std::{fmt, io, mem, net};
use actix::{Actor, Addr, AsyncContext, Context, Handler, System};
use actix::{Addr, System};
use actix_net::server::Server;
use actix_net::service::NewService;
use actix_net::ssl;
use futures::{Future, Stream};
use net2::{TcpBuilder, TcpStreamExt};
use net2::TcpBuilder;
use num_cpus;
use tokio_current_thread::spawn;
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_tcp::TcpStream;
#[cfg(feature = "tls")]
use native_tls::TlsAcceptor;
#[cfg(feature = "alpn")]
#[cfg(any(feature = "alpn", feature = "ssl"))]
use openssl::ssl::SslAcceptorBuilder;
#[cfg(feature = "rust-tls")]
use rustls::ServerConfig;
use super::channel::{HttpChannel, WrapperStream};
use super::server::{Connections, Server, Service, ServiceHandler};
use super::settings::{ServerSettings, WorkerSettings};
use super::worker::{Conn, Socket};
use super::{
AcceptorService, HttpHandler, IntoAsyncIo, IntoHttpHandler, IoStream, KeepAlive,
Token,
};
use super::acceptor::{AcceptorServiceFactory, DefaultAcceptor};
use super::builder::{HttpServiceBuilder, ServiceProvider};
use super::{IntoHttpHandler, KeepAlive};
struct Socket {
scheme: &'static str,
lst: net::TcpListener,
addr: net::SocketAddr,
handler: Box<ServiceProvider>,
}
/// An HTTP Server
///
/// By default it serves HTTP2 when HTTPs is enabled,
/// in order to change it, use `ServerFlags` that can be provided
/// to acceptor service.
pub struct HttpServer<H>
pub struct HttpServer<H, F>
where
H: IntoHttpHandler + 'static,
F: Fn() -> H + Send + Clone,
{
factory: Arc<Fn() -> Vec<H> + Send + Sync>,
host: Option<String>,
keep_alive: KeepAlive,
pub(super) factory: F,
pub(super) host: Option<String>,
pub(super) keep_alive: KeepAlive,
pub(super) client_timeout: u64,
pub(super) client_shutdown: u64,
backlog: i32,
threads: usize,
exit: bool,
@@ -51,36 +52,30 @@ where
maxconn: usize,
maxconnrate: usize,
sockets: Vec<Socket>,
handlers: Vec<Box<IoStreamHandler<H::Handler, net::TcpStream>>>,
}
impl<H> HttpServer<H>
impl<H, F> HttpServer<H, F>
where
H: IntoHttpHandler + 'static,
F: Fn() -> H + Send + Clone + 'static,
{
/// Create new http server with application factory
pub fn new<F, U>(factory: F) -> Self
where
F: Fn() -> U + Sync + Send + 'static,
U: IntoIterator<Item = H> + 'static,
{
let f = move || (factory)().into_iter().collect();
pub fn new(factory: F) -> HttpServer<H, F> {
HttpServer {
factory,
threads: num_cpus::get(),
factory: Arc::new(f),
host: None,
backlog: 2048,
keep_alive: KeepAlive::Os,
keep_alive: KeepAlive::Timeout(5),
shutdown_timeout: 30,
exit: false,
no_http2: false,
no_signals: false,
maxconn: 102_400,
maxconn: 25_600,
maxconnrate: 256,
// settings: None,
client_timeout: 5000,
client_shutdown: 5000,
sockets: Vec::new(),
handlers: Vec::new(),
}
}
@@ -113,7 +108,7 @@ where
/// All socket listeners will stop accepting connections when this limit is reached
/// for each worker.
///
/// By default max connections is set to a 100k.
/// By default max connections is set to a 25k.
pub fn maxconn(mut self, num: usize) -> Self {
self.maxconn = num;
self
@@ -132,12 +127,39 @@ where
/// Set server keep-alive setting.
///
/// By default keep alive is set to a `Os`.
/// By default keep alive is set to a 5 seconds.
pub fn keep_alive<T: Into<KeepAlive>>(mut self, val: T) -> Self {
self.keep_alive = val.into();
self
}
/// Set server client timeout in milliseconds for first request.
///
/// Defines a timeout for reading client request header. If a client does not transmit
/// the entire set headers within this time, the request is terminated with
/// the 408 (Request Time-out) error.
///
/// To disable timeout set value to 0.
///
/// By default client timeout is set to 5000 milliseconds.
pub fn client_timeout(mut self, val: u64) -> Self {
self.client_timeout = val;
self
}
/// Set server connection shutdown timeout in milliseconds.
///
/// Defines a timeout for shutdown connection. If a shutdown procedure does not complete
/// within this time, the request is dropped.
///
/// To disable timeout set value to 0.
///
/// By default client timeout is set to 5000 milliseconds.
pub fn client_shutdown(mut self, val: u64) -> Self {
self.client_shutdown = val;
self
}
/// Set server host name.
///
/// Host name is used by application router aa a hostname for url
@@ -175,11 +197,6 @@ where
}
/// Disable `HTTP/2` support
// #[doc(hidden)]
// #[deprecated(
// since = "0.7.4",
// note = "please use acceptor service with proper ServerFlags parama"
// )]
pub fn no_http2(mut self) -> Self {
self.no_http2 = true;
self
@@ -197,10 +214,7 @@ where
/// and the user should be presented with an enumeration of which
/// socket requires which protocol.
pub fn addrs_with_scheme(&self) -> Vec<(net::SocketAddr, &str)> {
self.handlers
.iter()
.map(|s| (s.addr(), s.scheme()))
.collect()
self.sockets.iter().map(|s| (s.addr, s.scheme)).collect()
}
/// Use listener for accepting incoming connection requests
@@ -208,11 +222,16 @@ where
/// HttpServer does not change any configuration for TcpListener,
/// it needs to be configured before passing it to listen() method.
pub fn listen(mut self, lst: net::TcpListener) -> Self {
let token = Token(self.handlers.len());
let addr = lst.local_addr().unwrap();
self.handlers
.push(Box::new(SimpleHandler::new(lst.local_addr().unwrap())));
self.sockets.push(Socket { lst, addr, token });
self.sockets.push(Socket {
lst,
addr,
scheme: "http",
handler: Box::new(HttpServiceBuilder::new(
self.factory.clone(),
DefaultAcceptor,
)),
});
self
}
@@ -221,15 +240,16 @@ where
/// Use listener for accepting incoming connection requests
pub fn listen_with<A>(mut self, lst: net::TcpListener, acceptor: A) -> Self
where
A: AcceptorService<TcpStream> + Send + 'static,
A: AcceptorServiceFactory,
<A::NewService as NewService>::InitError: fmt::Debug,
{
let token = Token(self.handlers.len());
let addr = lst.local_addr().unwrap();
self.handlers.push(Box::new(StreamHandler::new(
lst.local_addr().unwrap(),
acceptor,
)));
self.sockets.push(Socket { lst, addr, token });
self.sockets.push(Socket {
lst,
addr,
scheme: "https",
handler: Box::new(HttpServiceBuilder::new(self.factory.clone(), acceptor)),
});
self
}
@@ -240,36 +260,42 @@ where
/// HttpServer does not change any configuration for TcpListener,
/// it needs to be configured before passing it to listen() method.
pub fn listen_tls(self, lst: net::TcpListener, acceptor: TlsAcceptor) -> Self {
use super::NativeTlsAcceptor;
use actix_net::service::NewServiceExt;
self.listen_with(lst, NativeTlsAcceptor::new(acceptor))
self.listen_with(lst, move || {
ssl::NativeTlsAcceptor::new(acceptor.clone()).map_err(|_| ())
})
}
#[cfg(feature = "alpn")]
#[cfg(any(feature = "alpn", feature = "ssl"))]
/// Use listener for accepting incoming tls connection requests
///
/// This method sets alpn protocols to "h2" and "http/1.1"
pub fn listen_ssl(
self, lst: net::TcpListener, builder: SslAcceptorBuilder,
) -> io::Result<Self> {
use super::{OpensslAcceptor, ServerFlags};
use super::{openssl_acceptor_with_flags, ServerFlags};
use actix_net::service::NewServiceExt;
// alpn support
let flags = if self.no_http2 {
ServerFlags::HTTP1
} else {
ServerFlags::HTTP1 | ServerFlags::HTTP2
};
Ok(self.listen_with(lst, OpensslAcceptor::with_flags(builder, flags)?))
let acceptor = openssl_acceptor_with_flags(builder, flags)?;
Ok(self.listen_with(lst, move || {
ssl::OpensslAcceptor::new(acceptor.clone()).map_err(|_| ())
}))
}
#[cfg(feature = "rust-tls")]
/// Use listener for accepting incoming tls connection requests
///
/// This method sets alpn protocols to "h2" and "http/1.1"
pub fn listen_rustls(self, lst: net::TcpListener, builder: ServerConfig) -> Self {
pub fn listen_rustls(self, lst: net::TcpListener, config: ServerConfig) -> Self {
use super::{RustlsAcceptor, ServerFlags};
use actix_net::service::NewServiceExt;
// alpn support
let flags = if self.no_http2 {
@@ -278,7 +304,9 @@ where
ServerFlags::HTTP1 | ServerFlags::HTTP2
};
self.listen_with(lst, RustlsAcceptor::with_flags(builder, flags))
self.listen_with(lst, move || {
RustlsAcceptor::with_flags(config.clone(), flags).map_err(|_| ())
})
}
/// The socket address to bind
@@ -288,11 +316,7 @@ where
let sockets = self.bind2(addr)?;
for lst in sockets {
let token = Token(self.handlers.len());
let addr = lst.local_addr().unwrap();
self.handlers
.push(Box::new(SimpleHandler::new(lst.local_addr().unwrap())));
self.sockets.push(Socket { lst, addr, token })
self = self.listen(lst);
}
Ok(self)
@@ -300,22 +324,29 @@ where
/// Start listening for incoming connections with supplied acceptor.
#[doc(hidden)]
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
#[cfg_attr(
feature = "cargo-clippy",
allow(needless_pass_by_value)
)]
pub fn bind_with<S, A>(mut self, addr: S, acceptor: A) -> io::Result<Self>
where
S: net::ToSocketAddrs,
A: AcceptorService<TcpStream> + Send + 'static,
A: AcceptorServiceFactory,
<A::NewService as NewService>::InitError: fmt::Debug,
{
let sockets = self.bind2(addr)?;
for lst in sockets {
let token = Token(self.handlers.len());
let addr = lst.local_addr().unwrap();
self.handlers.push(Box::new(StreamHandler::new(
lst.local_addr().unwrap(),
acceptor.clone(),
)));
self.sockets.push(Socket { lst, addr, token })
self.sockets.push(Socket {
lst,
addr,
scheme: "https",
handler: Box::new(HttpServiceBuilder::new(
self.factory.clone(),
acceptor.clone(),
)),
});
}
Ok(self)
@@ -358,12 +389,15 @@ where
pub fn bind_tls<S: net::ToSocketAddrs>(
self, addr: S, acceptor: TlsAcceptor,
) -> io::Result<Self> {
use super::NativeTlsAcceptor;
use actix_net::service::NewServiceExt;
use actix_net::ssl::NativeTlsAcceptor;
self.bind_with(addr, NativeTlsAcceptor::new(acceptor))
self.bind_with(addr, move || {
NativeTlsAcceptor::new(acceptor.clone()).map_err(|_| ())
})
}
#[cfg(feature = "alpn")]
#[cfg(any(feature = "alpn", feature = "ssl"))]
/// Start listening for incoming tls connections.
///
/// This method sets alpn protocols to "h2" and "http/1.1"
@@ -371,16 +405,20 @@ where
where
S: net::ToSocketAddrs,
{
use super::{OpensslAcceptor, ServerFlags};
use super::{openssl_acceptor_with_flags, ServerFlags};
use actix_net::service::NewServiceExt;
// alpn support
let flags = if !self.no_http2 {
let flags = if self.no_http2 {
ServerFlags::HTTP1
} else {
ServerFlags::HTTP1 | ServerFlags::HTTP2
};
self.bind_with(addr, OpensslAcceptor::with_flags(builder, flags)?)
let acceptor = openssl_acceptor_with_flags(builder, flags)?;
self.bind_with(addr, move || {
ssl::OpensslAcceptor::new(acceptor.clone()).map_err(|_| ())
})
}
#[cfg(feature = "rust-tls")]
@@ -391,89 +429,35 @@ where
self, addr: S, builder: ServerConfig,
) -> io::Result<Self> {
use super::{RustlsAcceptor, ServerFlags};
use actix_net::service::NewServiceExt;
// alpn support
let flags = if !self.no_http2 {
let flags = if self.no_http2 {
ServerFlags::HTTP1
} else {
ServerFlags::HTTP1 | ServerFlags::HTTP2
};
self.bind_with(addr, RustlsAcceptor::with_flags(builder, flags))
}
}
impl<H: IntoHttpHandler> Into<(Box<Service>, Vec<(Token, net::TcpListener)>)>
for HttpServer<H>
{
fn into(mut self) -> (Box<Service>, Vec<(Token, net::TcpListener)>) {
let sockets: Vec<_> = mem::replace(&mut self.sockets, Vec::new())
.into_iter()
.map(|item| (item.token, item.lst))
.collect();
(
Box::new(HttpService {
factory: self.factory,
host: self.host,
keep_alive: self.keep_alive,
handlers: self.handlers,
}),
sockets,
)
}
}
struct HttpService<H: IntoHttpHandler> {
factory: Arc<Fn() -> Vec<H> + Send + Sync>,
host: Option<String>,
keep_alive: KeepAlive,
handlers: Vec<Box<IoStreamHandler<H::Handler, net::TcpStream>>>,
}
impl<H: IntoHttpHandler + 'static> Service for HttpService<H> {
fn clone(&self) -> Box<Service> {
Box::new(HttpService {
factory: self.factory.clone(),
host: self.host.clone(),
keep_alive: self.keep_alive,
handlers: self.handlers.iter().map(|v| v.clone()).collect(),
self.bind_with(addr, move || {
RustlsAcceptor::with_flags(builder.clone(), flags).map_err(|_| ())
})
}
fn create(&self, conns: Connections) -> Box<ServiceHandler> {
let addr = self.handlers[0].addr();
let s = ServerSettings::new(Some(addr), &self.host, false);
let apps: Vec<_> = (*self.factory)()
.into_iter()
.map(|h| h.into_handler())
.collect();
let handlers = self.handlers.iter().map(|h| h.clone()).collect();
Box::new(HttpServiceHandler::new(
apps,
handlers,
self.keep_alive,
s,
conns,
))
}
}
impl<H: IntoHttpHandler> HttpServer<H> {
impl<H: IntoHttpHandler, F: Fn() -> H + Send + Clone> HttpServer<H, F> {
/// Start listening for incoming connections.
///
/// This method starts number of http workers in separate threads.
/// For each address this method starts separate thread which does
/// `accept()` in a loop.
///
/// This methods panics if no socket addresses get bound.
///
/// This method requires to run within properly configured `Actix` system.
/// This methods panics if no socket address can be bound or an `Actix` system is not yet
/// configured.
///
/// ```rust
/// extern crate actix_web;
/// use actix_web::{actix, server, App, HttpResponse};
/// extern crate actix;
/// use actix_web::{server, App, HttpResponse};
///
/// fn main() {
/// let sys = actix::System::new("example"); // <- create Actix system
@@ -486,11 +470,12 @@ impl<H: IntoHttpHandler> HttpServer<H> {
/// sys.run(); // <- Run actix system, this method starts all async processes
/// }
/// ```
pub fn start(self) -> Addr<Server> {
pub fn start(mut self) -> Addr<Server> {
ssl::max_concurrent_ssl_connect(self.maxconnrate);
let mut srv = Server::new()
.workers(self.threads)
.maxconn(self.maxconn)
.maxconnrate(self.maxconnrate)
.shutdown_timeout(self.shutdown_timeout);
srv = if self.exit { srv.system_exit() } else { srv };
@@ -500,7 +485,31 @@ impl<H: IntoHttpHandler> HttpServer<H> {
srv
};
srv.service(self).start()
let sockets = mem::replace(&mut self.sockets, Vec::new());
for socket in sockets {
let host = self
.host
.as_ref()
.map(|h| h.to_owned())
.unwrap_or_else(|| format!("{}", socket.addr));
let (secure, client_shutdown) = if socket.scheme == "https" {
(true, self.client_shutdown)
} else {
(false, 0)
};
srv = socket.handler.register(
srv,
socket.lst,
host,
socket.addr,
self.keep_alive,
secure,
self.client_timeout,
client_shutdown,
);
}
srv.start()
}
/// Spawn new thread and start listening for incoming connections.
@@ -528,277 +537,35 @@ impl<H: IntoHttpHandler> HttpServer<H> {
self.start();
sys.run();
}
}
impl<H: IntoHttpHandler> HttpServer<H> {
/// Start listening for incoming connections from a stream.
///
/// This method uses only one thread for handling incoming connections.
pub fn start_incoming<T, S>(self, stream: S, secure: bool)
where
S: Stream<Item = T, Error = io::Error> + Send + 'static,
T: AsyncRead + AsyncWrite + Send + 'static,
{
// set server settings
let addr: net::SocketAddr = "127.0.0.1:8080".parse().unwrap();
let srv_settings = ServerSettings::new(Some(addr), &self.host, secure);
let apps: Vec<_> = (*self.factory)()
.into_iter()
.map(|h| h.into_handler())
.collect();
let settings = WorkerSettings::create(
apps,
self.keep_alive,
srv_settings,
Connections::default(),
);
// start server
HttpIncoming::create(move |ctx| {
ctx.add_message_stream(stream.map_err(|_| ()).map(move |t| Conn {
io: WrapperStream::new(t),
handler: Token::new(0),
token: Token::new(0),
peer: None,
}));
HttpIncoming { settings }
});
}
}
struct HttpIncoming<H: HttpHandler> {
settings: Rc<WorkerSettings<H>>,
}
impl<H> Actor for HttpIncoming<H>
where
H: HttpHandler,
{
type Context = Context<Self>;
}
impl<T, H> Handler<Conn<T>> for HttpIncoming<H>
where
T: IoStream,
H: HttpHandler,
{
type Result = ();
fn handle(&mut self, msg: Conn<T>, _: &mut Context<Self>) -> Self::Result {
spawn(HttpChannel::new(
Rc::clone(&self.settings),
msg.io,
msg.peer,
));
}
}
struct HttpServiceHandler<H>
where
H: HttpHandler + 'static,
{
settings: Rc<WorkerSettings<H>>,
handlers: Vec<Box<IoStreamHandler<H, net::TcpStream>>>,
tcp_ka: Option<time::Duration>,
}
impl<H: HttpHandler + 'static> HttpServiceHandler<H> {
fn new(
apps: Vec<H>, handlers: Vec<Box<IoStreamHandler<H, net::TcpStream>>>,
keep_alive: KeepAlive, settings: ServerSettings, conns: Connections,
) -> HttpServiceHandler<H> {
let tcp_ka = if let KeepAlive::Tcp(val) = keep_alive {
Some(time::Duration::new(val as u64, 0))
} else {
None
};
let settings = WorkerSettings::create(apps, keep_alive, settings, conns);
HttpServiceHandler {
handlers,
tcp_ka,
settings,
/// Register current http server as actix-net's server service
pub fn register(self, mut srv: Server) -> Server {
for socket in self.sockets {
let host = self
.host
.as_ref()
.map(|h| h.to_owned())
.unwrap_or_else(|| format!("{}", socket.addr));
let (secure, client_shutdown) = if socket.scheme == "https" {
(true, self.client_shutdown)
} else {
(false, 0)
};
srv = socket.handler.register(
srv,
socket.lst,
host,
socket.addr,
self.keep_alive,
secure,
self.client_timeout,
client_shutdown,
);
}
srv
}
}
impl<H> ServiceHandler for HttpServiceHandler<H>
where
H: HttpHandler + 'static,
{
fn handle(
&mut self, token: Token, io: net::TcpStream, peer: Option<net::SocketAddr>,
) {
if self.tcp_ka.is_some() && io.set_keepalive(self.tcp_ka).is_err() {
error!("Can not set socket keep-alive option");
}
self.handlers[token.0].handle(Rc::clone(&self.settings), io, peer);
}
fn shutdown(&self, force: bool) {
if force {
self.settings.head().traverse::<TcpStream, H>();
}
}
}
struct SimpleHandler<Io> {
addr: net::SocketAddr,
io: PhantomData<Io>,
}
impl<Io: IntoAsyncIo> Clone for SimpleHandler<Io> {
fn clone(&self) -> Self {
SimpleHandler {
addr: self.addr,
io: PhantomData,
}
}
}
impl<Io: IntoAsyncIo> SimpleHandler<Io> {
fn new(addr: net::SocketAddr) -> Self {
SimpleHandler {
addr,
io: PhantomData,
}
}
}
impl<H, Io> IoStreamHandler<H, Io> for SimpleHandler<Io>
where
H: HttpHandler,
Io: IntoAsyncIo + Send + 'static,
Io::Io: IoStream,
{
fn addr(&self) -> net::SocketAddr {
self.addr
}
fn clone(&self) -> Box<IoStreamHandler<H, Io>> {
Box::new(Clone::clone(self))
}
fn scheme(&self) -> &'static str {
"http"
}
fn handle(&self, h: Rc<WorkerSettings<H>>, io: Io, peer: Option<net::SocketAddr>) {
let mut io = match io.into_async_io() {
Ok(io) => io,
Err(err) => {
trace!("Failed to create async io: {}", err);
return;
}
};
let _ = io.set_nodelay(true);
spawn(HttpChannel::new(h, io, peer));
}
}
struct StreamHandler<A, Io> {
acceptor: A,
addr: net::SocketAddr,
io: PhantomData<Io>,
}
impl<Io: IntoAsyncIo, A: AcceptorService<Io::Io>> StreamHandler<A, Io> {
fn new(addr: net::SocketAddr, acceptor: A) -> Self {
StreamHandler {
addr,
acceptor,
io: PhantomData,
}
}
}
impl<Io: IntoAsyncIo, A: AcceptorService<Io::Io>> Clone for StreamHandler<A, Io> {
fn clone(&self) -> Self {
StreamHandler {
addr: self.addr,
acceptor: self.acceptor.clone(),
io: PhantomData,
}
}
}
impl<H, Io, A> IoStreamHandler<H, Io> for StreamHandler<A, Io>
where
H: HttpHandler,
Io: IntoAsyncIo + Send + 'static,
Io::Io: IoStream,
A: AcceptorService<Io::Io> + Send + 'static,
{
fn addr(&self) -> net::SocketAddr {
self.addr
}
fn clone(&self) -> Box<IoStreamHandler<H, Io>> {
Box::new(Clone::clone(self))
}
fn scheme(&self) -> &'static str {
self.acceptor.scheme()
}
fn handle(&self, h: Rc<WorkerSettings<H>>, io: Io, peer: Option<net::SocketAddr>) {
let mut io = match io.into_async_io() {
Ok(io) => io,
Err(err) => {
trace!("Failed to create async io: {}", err);
return;
}
};
let _ = io.set_nodelay(true);
let rate = h.connection_rate();
spawn(self.acceptor.accept(io).then(move |res| {
drop(rate);
match res {
Ok(io) => spawn(HttpChannel::new(h, io, peer)),
Err(err) => trace!("Can not establish connection: {}", err),
}
Ok(())
}))
}
}
impl<H, Io: 'static> IoStreamHandler<H, Io> for Box<IoStreamHandler<H, Io>>
where
H: HttpHandler,
Io: IntoAsyncIo,
{
fn addr(&self) -> net::SocketAddr {
self.as_ref().addr()
}
fn clone(&self) -> Box<IoStreamHandler<H, Io>> {
self.as_ref().clone()
}
fn scheme(&self) -> &'static str {
self.as_ref().scheme()
}
fn handle(&self, h: Rc<WorkerSettings<H>>, io: Io, peer: Option<net::SocketAddr>) {
self.as_ref().handle(h, io, peer)
}
}
trait IoStreamHandler<H, Io>: Send
where
H: HttpHandler,
{
fn clone(&self) -> Box<IoStreamHandler<H, Io>>;
fn addr(&self) -> net::SocketAddr;
fn scheme(&self) -> &'static str;
fn handle(&self, h: Rc<WorkerSettings<H>>, io: Io, peer: Option<net::SocketAddr>);
}
fn create_tcp_listener(
addr: net::SocketAddr, backlog: i32,
) -> io::Result<net::TcpListener> {

69
src/server/incoming.rs Normal file
View File

@@ -0,0 +1,69 @@
//! Support for `Stream<Item=T::AsyncReady+AsyncWrite>`, deprecated!
use std::{io, net};
use actix::{Actor, Arbiter, AsyncContext, Context, Handler, Message};
use futures::{Future, Stream};
use tokio_io::{AsyncRead, AsyncWrite};
use super::channel::{HttpChannel, WrapperStream};
use super::handler::{HttpHandler, IntoHttpHandler};
use super::http::HttpServer;
use super::settings::{ServerSettings, ServiceConfig};
impl<T: AsyncRead + AsyncWrite + 'static> Message for WrapperStream<T> {
type Result = ();
}
impl<H, F> HttpServer<H, F>
where
H: IntoHttpHandler,
F: Fn() -> H + Send + Clone,
{
#[doc(hidden)]
#[deprecated(since = "0.7.8")]
/// Start listening for incoming connections from a stream.
///
/// This method uses only one thread for handling incoming connections.
pub fn start_incoming<T, S>(self, stream: S, secure: bool)
where
S: Stream<Item = T, Error = io::Error> + 'static,
T: AsyncRead + AsyncWrite + 'static,
{
// set server settings
let addr: net::SocketAddr = "127.0.0.1:8080".parse().unwrap();
let apps = (self.factory)().into_handler();
let settings = ServiceConfig::new(
apps,
self.keep_alive,
self.client_timeout,
self.client_shutdown,
ServerSettings::new(addr, "127.0.0.1:8080", secure),
);
// start server
HttpIncoming::create(move |ctx| {
ctx.add_message_stream(stream.map_err(|_| ()).map(WrapperStream::new));
HttpIncoming { settings }
});
}
}
struct HttpIncoming<H: HttpHandler> {
settings: ServiceConfig<H>,
}
impl<H: HttpHandler> Actor for HttpIncoming<H> {
type Context = Context<Self>;
}
impl<T, H> Handler<WrapperStream<T>> for HttpIncoming<H>
where
T: AsyncRead + AsyncWrite,
H: HttpHandler,
{
type Result = ();
fn handle(&mut self, msg: WrapperStream<T>, _: &mut Context<Self>) -> Self::Result {
Arbiter::spawn(HttpChannel::new(self.settings.clone(), msg).map_err(|_| ()));
}
}

View File

@@ -1,5 +1,6 @@
use std::cell::{Cell, Ref, RefCell, RefMut};
use std::collections::VecDeque;
use std::fmt;
use std::net::SocketAddr;
use std::rc::Rc;
@@ -220,6 +221,26 @@ impl Request {
}
}
impl fmt::Debug for Request {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
writeln!(
f,
"\nRequest {:?} {}:{}",
self.version(),
self.method(),
self.path()
)?;
if let Some(q) = self.uri().query().as_ref() {
writeln!(f, " query: ?{:?}", q)?;
}
writeln!(f, " headers:")?;
for (key, val) in self.headers().iter() {
writeln!(f, " {:?}: {:?}", key, val)?;
}
Ok(())
}
}
pub(crate) struct RequestPool(
RefCell<VecDeque<Rc<InnerRequest>>>,
RefCell<ServerSettings>,

View File

@@ -12,7 +12,7 @@
//! to serve incoming HTTP requests.
//!
//! As the server uses worker pool, the factory function is restricted to trait bounds
//! `Sync + Send + 'static` so that each worker would be able to accept Application
//! `Send + Clone + 'static` so that each worker would be able to accept Application
//! without a need for synchronization.
//!
//! If you wish to share part of state among all workers you should
@@ -29,13 +29,9 @@
//! Each TLS implementation is provided with [AcceptorService](trait.AcceptorService.html)
//! that describes how HTTP Server accepts connections.
//!
//! For `bind` and `listen` there are corresponding `bind_with` and `listen_with` that accepts
//! For `bind` and `listen` there are corresponding `bind_ssl|tls|rustls` and `listen_ssl|tls|rustls` that accepts
//! these services.
//!
//! By default, acceptor would work with both HTTP2 and HTTP1 protocols.
//! But it can be controlled using [ServerFlags](struct.ServerFlags.html) which
//! can be supplied when creating `AcceptorService`.
//!
//! **NOTE:** `native-tls` doesn't support `HTTP2` yet
//!
//! ## Signal handling and shutdown
@@ -87,17 +83,13 @@
//! // load ssl keys
//! let config = load_ssl();
//!
//! // Create acceptor service for only HTTP1 protocol
//! // You can use ::new(config) to leave defaults
//! let acceptor = server::RustlsAcceptor::with_flags(config, actix_web::server::ServerFlags::HTTP1);
//!
//! // create and start server at once
//! server::new(|| {
//! App::new()
//! // register simple handler, handle all methods
//! .resource("/index.html", |r| r.f(index))
//! }))
//! }).bind_with("127.0.0.1:8080", acceptor)
//! }).bind_rustls("127.0.0.1:8443", config)
//! .unwrap()
//! .start();
//!
@@ -106,17 +98,19 @@
//! let _ = sys.run();
//!}
//! ```
use std::net::Shutdown;
use std::net::{Shutdown, SocketAddr};
use std::rc::Rc;
use std::{io, net, time};
use std::{io, time};
use bytes::{BufMut, BytesMut};
use futures::{Async, Future, Poll};
use futures::{Async, Poll};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_reactor::Handle;
use tokio_tcp::TcpStream;
pub(crate) mod accept;
pub use actix_net::server::{PauseServer, ResumeServer, StopServer};
pub(crate) mod acceptor;
pub(crate) mod builder;
mod channel;
mod error;
pub(crate) mod h1;
@@ -124,35 +118,38 @@ pub(crate) mod h1decoder;
mod h1writer;
mod h2;
mod h2writer;
mod handler;
pub(crate) mod helpers;
mod http;
pub(crate) mod incoming;
pub(crate) mod input;
pub(crate) mod message;
pub(crate) mod output;
mod server;
pub(crate) mod service;
pub(crate) mod settings;
mod ssl;
mod worker;
use actix::Message;
pub use self::message::Request;
pub use self::handler::*;
pub use self::http::HttpServer;
#[doc(hidden)]
pub use self::server::{
ConnectionRateTag, ConnectionTag, Connections, Server, Service, ServiceHandler,
};
pub use self::message::Request;
pub use self::ssl::*;
pub use self::error::{AcceptorError, HttpDispatchError};
pub use self::settings::ServerSettings;
#[doc(hidden)]
pub use self::ssl::*;
pub use self::acceptor::AcceptorTimeout;
#[doc(hidden)]
pub use self::settings::{ServiceConfig, ServiceConfigBuilder};
#[doc(hidden)]
pub use self::service::{H1Service, HttpService, StreamConfiguration};
#[doc(hidden)]
pub use self::helpers::write_content_length;
use body::Binary;
use error::Error;
use extensions::Extensions;
use header::ContentEncoding;
use httpresponse::HttpResponse;
@@ -169,7 +166,8 @@ const HW_BUFFER_SIZE: usize = 32_768;
///
/// ```rust
/// # extern crate actix_web;
/// use actix_web::{actix, server, App, HttpResponse};
/// # extern crate actix;
/// use actix_web::{server, App, HttpResponse};
///
/// fn main() {
/// let sys = actix::System::new("example"); // <- create Actix system
@@ -184,10 +182,9 @@ const HW_BUFFER_SIZE: usize = 32_768;
/// sys.run();
/// }
/// ```
pub fn new<F, U, H>(factory: F) -> HttpServer<H>
pub fn new<F, H>(factory: F) -> HttpServer<H, F>
where
F: Fn() -> U + Sync + Send + 'static,
U: IntoIterator<Item = H> + 'static,
F: Fn() -> H + Send + Clone + 'static,
H: IntoHttpHandler + 'static,
{
HttpServer::new(factory)
@@ -233,124 +230,6 @@ impl From<Option<usize>> for KeepAlive {
}
}
/// Pause accepting incoming connections
///
/// If socket contains some pending connection, they might be dropped.
/// All opened connection remains active.
#[derive(Message)]
pub struct PauseServer;
/// Resume accepting incoming connections
#[derive(Message)]
pub struct ResumeServer;
/// Stop incoming connection processing, stop all workers and exit.
///
/// If server starts with `spawn()` method, then spawned thread get terminated.
pub struct StopServer {
/// Whether to try and shut down gracefully
pub graceful: bool,
}
impl Message for StopServer {
type Result = Result<(), ()>;
}
/// Socket id token
#[doc(hidden)]
#[derive(Clone, Copy)]
pub struct Token(usize);
impl Token {
pub(crate) fn new(val: usize) -> Token {
Token(val)
}
}
/// Low level http request handler
#[allow(unused_variables)]
pub trait HttpHandler: 'static {
/// Request handling task
type Task: HttpHandlerTask;
/// Handle request
fn handle(&self, req: Request) -> Result<Self::Task, Request>;
}
impl HttpHandler for Box<HttpHandler<Task = Box<HttpHandlerTask>>> {
type Task = Box<HttpHandlerTask>;
fn handle(&self, req: Request) -> Result<Box<HttpHandlerTask>, Request> {
self.as_ref().handle(req)
}
}
/// Low level http request handler
pub trait HttpHandlerTask {
/// Poll task, this method is used before or after *io* object is available
fn poll_completed(&mut self) -> Poll<(), Error> {
Ok(Async::Ready(()))
}
/// Poll task when *io* object is available
fn poll_io(&mut self, io: &mut Writer) -> Poll<bool, Error>;
/// Connection is disconnected
fn disconnected(&mut self) {}
}
impl HttpHandlerTask for Box<HttpHandlerTask> {
fn poll_io(&mut self, io: &mut Writer) -> Poll<bool, Error> {
self.as_mut().poll_io(io)
}
}
/// Conversion helper trait
pub trait IntoHttpHandler {
/// The associated type which is result of conversion.
type Handler: HttpHandler;
/// Convert into `HttpHandler` object.
fn into_handler(self) -> Self::Handler;
}
impl<T: HttpHandler> IntoHttpHandler for T {
type Handler = T;
fn into_handler(self) -> Self::Handler {
self
}
}
pub(crate) trait IntoAsyncIo {
type Io: AsyncRead + AsyncWrite;
fn into_async_io(self) -> Result<Self::Io, io::Error>;
}
impl IntoAsyncIo for net::TcpStream {
type Io = TcpStream;
fn into_async_io(self) -> Result<Self::Io, io::Error> {
TcpStream::from_std(self, &Handle::default())
}
}
#[doc(hidden)]
/// Trait implemented by types that could accept incomming socket connections.
pub trait AcceptorService<Io: AsyncRead + AsyncWrite>: Clone {
/// Established connection type
type Accepted: IoStream;
/// Future describes async accept process.
type Future: Future<Item = Self::Accepted, Error = io::Error> + 'static;
/// Establish new connection
fn accept(&self, io: Io) -> Self::Future;
/// Scheme
fn scheme(&self) -> &'static str;
}
#[doc(hidden)]
#[derive(Debug)]
pub enum WriterState {
@@ -386,37 +265,49 @@ pub trait Writer {
pub trait IoStream: AsyncRead + AsyncWrite + 'static {
fn shutdown(&mut self, how: Shutdown) -> io::Result<()>;
/// Returns the socket address of the remote peer of this TCP connection.
fn peer_addr(&self) -> Option<SocketAddr> {
None
}
/// Sets the value of the TCP_NODELAY option on this socket.
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()>;
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()>;
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()>;
fn read_available(&mut self, buf: &mut BytesMut) -> Poll<(bool, bool), io::Error> {
let mut read_some = false;
loop {
if buf.remaining_mut() < LW_BUFFER_SIZE {
buf.reserve(HW_BUFFER_SIZE);
}
unsafe {
match self.read(buf.bytes_mut()) {
Ok(n) => {
if n == 0 {
return Ok(Async::Ready((read_some, true)));
} else {
read_some = true;
let read = unsafe { self.read(buf.bytes_mut()) };
match read {
Ok(n) => {
if n == 0 {
return Ok(Async::Ready((read_some, true)));
} else {
read_some = true;
unsafe {
buf.advance_mut(n);
}
}
Err(e) => {
return if e.kind() == io::ErrorKind::WouldBlock {
if read_some {
Ok(Async::Ready((read_some, false)))
} else {
Ok(Async::NotReady)
}
}
Err(e) => {
return if e.kind() == io::ErrorKind::WouldBlock {
if read_some {
Ok(Async::Ready((read_some, false)))
} else {
Err(e)
};
}
Ok(Async::NotReady)
}
} else if e.kind() == io::ErrorKind::ConnectionReset && read_some {
Ok(Async::Ready((read_some, true)))
} else {
Err(e)
};
}
}
}
@@ -444,6 +335,11 @@ impl IoStream for ::tokio_uds::UnixStream {
fn set_linger(&mut self, _dur: Option<time::Duration>) -> io::Result<()> {
Ok(())
}
#[inline]
fn set_keepalive(&mut self, _dur: Option<time::Duration>) -> io::Result<()> {
Ok(())
}
}
impl IoStream for TcpStream {
@@ -452,6 +348,11 @@ impl IoStream for TcpStream {
TcpStream::shutdown(self, how)
}
#[inline]
fn peer_addr(&self) -> Option<SocketAddr> {
TcpStream::peer_addr(self).ok()
}
#[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
TcpStream::set_nodelay(self, nodelay)
@@ -461,4 +362,9 @@ impl IoStream for TcpStream {
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
TcpStream::set_linger(self, dur)
}
#[inline]
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
TcpStream::set_keepalive(self, dur)
}
}

View File

@@ -11,7 +11,7 @@ use flate2::write::{GzEncoder, ZlibEncoder};
#[cfg(feature = "flate2")]
use flate2::Compression;
use http::header::{ACCEPT_ENCODING, CONTENT_LENGTH};
use http::Version;
use http::{StatusCode, Version};
use super::message::InnerRequest;
use body::{Binary, Body};
@@ -151,10 +151,9 @@ impl Output {
let version = resp.version().unwrap_or_else(|| req.version);
let mut len = 0;
#[cfg_attr(feature = "cargo-clippy", allow(match_ref_pats))]
let has_body = match resp.body() {
&Body::Empty => false,
&Body::Binary(ref bin) => {
Body::Empty => false,
Body::Binary(ref bin) => {
len = bin.len();
!(response_encoding == ContentEncoding::Auto && len < 96)
}
@@ -190,16 +189,19 @@ impl Output {
#[cfg(not(any(feature = "brotli", feature = "flate2")))]
let mut encoding = ContentEncoding::Identity;
#[cfg_attr(feature = "cargo-clippy", allow(match_ref_pats))]
let transfer = match resp.body() {
&Body::Empty => {
if !info.head {
info.length = ResponseLength::Zero;
}
Body::Empty => {
info.length = match resp.status() {
StatusCode::NO_CONTENT
| StatusCode::CONTINUE
| StatusCode::SWITCHING_PROTOCOLS
| StatusCode::PROCESSING => ResponseLength::None,
_ => ResponseLength::Zero,
};
*self = Output::Empty(buf);
return;
}
&Body::Binary(_) => {
Body::Binary(_) => {
#[cfg(any(feature = "brotli", feature = "flate2"))]
{
if !(encoding == ContentEncoding::Identity
@@ -244,7 +246,7 @@ impl Output {
}
return;
}
&Body::Streaming(_) | &Body::Actor(_) => {
Body::Streaming(_) | Body::Actor(_) => {
if resp.upgrade() {
if version == Version::HTTP_2 {
error!("Connection upgrade is forbidden for HTTP/2");
@@ -297,11 +299,10 @@ impl Output {
match resp.chunked() {
Some(true) => {
// Enable transfer encoding
info.length = ResponseLength::Chunked;
if version == Version::HTTP_2 {
info.length = ResponseLength::None;
TransferEncoding::eof(buf)
} else {
info.length = ResponseLength::Chunked;
TransferEncoding::chunked(buf)
}
}
@@ -335,15 +336,11 @@ impl Output {
}
} else {
// Enable transfer encoding
match version {
Version::HTTP_11 => {
info.length = ResponseLength::Chunked;
TransferEncoding::chunked(buf)
}
_ => {
info.length = ResponseLength::None;
TransferEncoding::eof(buf)
}
info.length = ResponseLength::Chunked;
if version == Version::HTTP_2 {
TransferEncoding::eof(buf)
} else {
TransferEncoding::chunked(buf)
}
}
}

View File

@@ -1,528 +0,0 @@
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
};
use std::time::Duration;
use std::{mem, net};
use futures::sync::{mpsc, mpsc::unbounded};
use futures::{Future, Sink, Stream};
use num_cpus;
use actix::{
fut, signal, Actor, ActorFuture, Addr, Arbiter, AsyncContext, Context, Handler,
Response, StreamHandler, System, WrapFuture,
};
use super::accept::{AcceptLoop, AcceptNotify, Command};
use super::worker::{Conn, StopWorker, Worker, WorkerClient};
use super::{PauseServer, ResumeServer, StopServer, Token};
#[doc(hidden)]
/// Describes service that could be used
/// with [Server](struct.Server.html)
pub trait Service: Send + 'static {
/// Clone service
fn clone(&self) -> Box<Service>;
/// Create service handler for this service
fn create(&self, conn: Connections) -> Box<ServiceHandler>;
}
impl Service for Box<Service> {
fn clone(&self) -> Box<Service> {
self.as_ref().clone()
}
fn create(&self, conn: Connections) -> Box<ServiceHandler> {
self.as_ref().create(conn)
}
}
#[doc(hidden)]
/// Describes the way serivce handles incoming
/// TCP connections.
pub trait ServiceHandler {
/// Handle incoming stream
fn handle(
&mut self, token: Token, io: net::TcpStream, peer: Option<net::SocketAddr>,
);
/// Shutdown open handlers
fn shutdown(&self, _: bool) {}
}
pub(crate) enum ServerCommand {
WorkerDied(usize),
}
/// Generic server
#[doc(hidden)]
pub struct Server {
threads: usize,
workers: Vec<(usize, Addr<Worker>)>,
services: Vec<Box<Service>>,
sockets: Vec<Vec<(Token, net::TcpListener)>>,
accept: AcceptLoop,
exit: bool,
shutdown_timeout: u16,
signals: Option<Addr<signal::ProcessSignals>>,
no_signals: bool,
maxconn: usize,
maxconnrate: usize,
}
impl Default for Server {
fn default() -> Self {
Self::new()
}
}
impl Server {
/// Create new Server instance
pub fn new() -> Server {
Server {
threads: num_cpus::get(),
workers: Vec::new(),
services: Vec::new(),
sockets: Vec::new(),
accept: AcceptLoop::new(),
exit: false,
shutdown_timeout: 30,
signals: None,
no_signals: false,
maxconn: 102_400,
maxconnrate: 256,
}
}
/// Set number of workers to start.
///
/// By default http server uses number of available logical cpu as threads
/// count.
pub fn workers(mut self, num: usize) -> Self {
self.threads = num;
self
}
/// Sets the maximum per-worker number of concurrent connections.
///
/// All socket listeners will stop accepting connections when this limit is reached
/// for each worker.
///
/// By default max connections is set to a 100k.
pub fn maxconn(mut self, num: usize) -> Self {
self.maxconn = num;
self
}
/// Sets the maximum per-worker concurrent connection establish process.
///
/// All listeners will stop accepting connections when this limit is reached. It
/// can be used to limit the global SSL CPU usage.
///
/// By default max connections is set to a 256.
pub fn maxconnrate(mut self, num: usize) -> Self {
self.maxconnrate = num;
self
}
/// Stop actix system.
///
/// `SystemExit` message stops currently running system.
pub fn system_exit(mut self) -> Self {
self.exit = true;
self
}
#[doc(hidden)]
/// Set alternative address for `ProcessSignals` actor.
pub fn signals(mut self, addr: Addr<signal::ProcessSignals>) -> Self {
self.signals = Some(addr);
self
}
/// Disable signal handling
pub fn disable_signals(mut self) -> Self {
self.no_signals = true;
self
}
/// Timeout for graceful workers shutdown.
///
/// After receiving a stop signal, workers have this much time to finish
/// serving requests. Workers still alive after the timeout are force
/// dropped.
///
/// By default shutdown timeout sets to 30 seconds.
pub fn shutdown_timeout(mut self, sec: u16) -> Self {
self.shutdown_timeout = sec;
self
}
/// Add new service to server
pub fn service<T>(mut self, srv: T) -> Self
where
T: Into<(Box<Service>, Vec<(Token, net::TcpListener)>)>,
{
let (srv, sockets) = srv.into();
self.services.push(srv);
self.sockets.push(sockets);
self
}
/// Spawn new thread and start listening for incoming connections.
///
/// This method spawns new thread and starts new actix system. Other than
/// that it is similar to `start()` method. This method blocks.
///
/// This methods panics if no socket addresses get bound.
///
/// ```rust,ignore
/// # extern crate futures;
/// # extern crate actix_web;
/// # use futures::Future;
/// use actix_web::*;
///
/// fn main() {
/// Server::new().
/// .service(
/// HttpServer::new(|| App::new().resource("/", |r| r.h(|_| HttpResponse::Ok())))
/// .bind("127.0.0.1:0")
/// .expect("Can not bind to 127.0.0.1:0"))
/// .run();
/// }
/// ```
pub fn run(self) {
let sys = System::new("http-server");
self.start();
sys.run();
}
/// Starts Server Actor and returns its address
pub fn start(mut self) -> Addr<Server> {
if self.sockets.is_empty() {
panic!("Service should have at least one bound socket");
} else {
info!("Starting {} http workers", self.threads);
// start workers
let mut workers = Vec::new();
for idx in 0..self.threads {
let (addr, worker) = self.start_worker(idx, self.accept.get_notify());
workers.push(worker);
self.workers.push((idx, addr));
}
// start accept thread
for sock in &self.sockets {
for s in sock.iter() {
info!("Starting server on http://{}", s.1.local_addr().unwrap());
}
}
let rx = self
.accept
.start(mem::replace(&mut self.sockets, Vec::new()), workers);
// start http server actor
let signals = self.subscribe_to_signals();
let addr = Actor::create(move |ctx| {
ctx.add_stream(rx);
self
});
if let Some(signals) = signals {
signals.do_send(signal::Subscribe(addr.clone().recipient()))
}
addr
}
}
// subscribe to os signals
fn subscribe_to_signals(&self) -> Option<Addr<signal::ProcessSignals>> {
if !self.no_signals {
if let Some(ref signals) = self.signals {
Some(signals.clone())
} else {
Some(System::current().registry().get::<signal::ProcessSignals>())
}
} else {
None
}
}
fn start_worker(
&self, idx: usize, notify: AcceptNotify,
) -> (Addr<Worker>, WorkerClient) {
let (tx, rx) = unbounded::<Conn<net::TcpStream>>();
let conns = Connections::new(notify, self.maxconn, self.maxconnrate);
let worker = WorkerClient::new(idx, tx, conns.clone());
let services: Vec<_> = self.services.iter().map(|v| v.clone()).collect();
let addr = Arbiter::start(move |ctx: &mut Context<_>| {
ctx.add_message_stream(rx);
let handlers: Vec<_> = services
.into_iter()
.map(|s| s.create(conns.clone()))
.collect();
Worker::new(conns, handlers)
});
(addr, worker)
}
}
impl Actor for Server {
type Context = Context<Self>;
}
/// Signals support
/// Handle `SIGINT`, `SIGTERM`, `SIGQUIT` signals and stop actix system
/// message to `System` actor.
impl Handler<signal::Signal> for Server {
type Result = ();
fn handle(&mut self, msg: signal::Signal, ctx: &mut Context<Self>) {
match msg.0 {
signal::SignalType::Int => {
info!("SIGINT received, exiting");
self.exit = true;
Handler::<StopServer>::handle(self, StopServer { graceful: false }, ctx);
}
signal::SignalType::Term => {
info!("SIGTERM received, stopping");
self.exit = true;
Handler::<StopServer>::handle(self, StopServer { graceful: true }, ctx);
}
signal::SignalType::Quit => {
info!("SIGQUIT received, exiting");
self.exit = true;
Handler::<StopServer>::handle(self, StopServer { graceful: false }, ctx);
}
_ => (),
}
}
}
impl Handler<PauseServer> for Server {
type Result = ();
fn handle(&mut self, _: PauseServer, _: &mut Context<Self>) {
self.accept.send(Command::Pause);
}
}
impl Handler<ResumeServer> for Server {
type Result = ();
fn handle(&mut self, _: ResumeServer, _: &mut Context<Self>) {
self.accept.send(Command::Resume);
}
}
impl Handler<StopServer> for Server {
type Result = Response<(), ()>;
fn handle(&mut self, msg: StopServer, ctx: &mut Context<Self>) -> Self::Result {
// stop accept thread
self.accept.send(Command::Stop);
// stop workers
let (tx, rx) = mpsc::channel(1);
let dur = if msg.graceful {
Some(Duration::new(u64::from(self.shutdown_timeout), 0))
} else {
None
};
for worker in &self.workers {
let tx2 = tx.clone();
ctx.spawn(
worker
.1
.send(StopWorker { graceful: dur })
.into_actor(self)
.then(move |_, slf, ctx| {
slf.workers.pop();
if slf.workers.is_empty() {
let _ = tx2.send(());
// we need to stop system if server was spawned
if slf.exit {
ctx.run_later(Duration::from_millis(300), |_, _| {
System::current().stop();
});
}
}
fut::ok(())
}),
);
}
if !self.workers.is_empty() {
Response::async(rx.into_future().map(|_| ()).map_err(|_| ()))
} else {
// we need to stop system if server was spawned
if self.exit {
ctx.run_later(Duration::from_millis(300), |_, _| {
System::current().stop();
});
}
Response::reply(Ok(()))
}
}
}
/// Commands from accept threads
impl StreamHandler<ServerCommand, ()> for Server {
fn finished(&mut self, _: &mut Context<Self>) {}
fn handle(&mut self, msg: ServerCommand, _: &mut Context<Self>) {
match msg {
ServerCommand::WorkerDied(idx) => {
let mut found = false;
for i in 0..self.workers.len() {
if self.workers[i].0 == idx {
self.workers.swap_remove(i);
found = true;
break;
}
}
if found {
error!("Worker has died {:?}, restarting", idx);
let mut new_idx = self.workers.len();
'found: loop {
for i in 0..self.workers.len() {
if self.workers[i].0 == new_idx {
new_idx += 1;
continue 'found;
}
}
break;
}
let (addr, worker) =
self.start_worker(new_idx, self.accept.get_notify());
self.workers.push((new_idx, addr));
self.accept.send(Command::Worker(worker));
}
}
}
}
}
#[derive(Clone, Default)]
///Contains information about connection.
pub struct Connections(Arc<ConnectionsInner>);
impl Connections {
fn new(notify: AcceptNotify, maxconn: usize, maxconnrate: usize) -> Self {
let maxconn_low = if maxconn > 10 { maxconn - 10 } else { 0 };
let maxconnrate_low = if maxconnrate > 10 {
maxconnrate - 10
} else {
0
};
Connections(Arc::new(ConnectionsInner {
notify,
maxconn,
maxconnrate,
maxconn_low,
maxconnrate_low,
conn: AtomicUsize::new(0),
connrate: AtomicUsize::new(0),
}))
}
pub(crate) fn available(&self) -> bool {
self.0.available()
}
pub(crate) fn num_connections(&self) -> usize {
self.0.conn.load(Ordering::Relaxed)
}
/// Report opened connection
pub fn connection(&self) -> ConnectionTag {
ConnectionTag::new(self.0.clone())
}
/// Report rate connection, rate is usually ssl handshake
pub fn connection_rate(&self) -> ConnectionRateTag {
ConnectionRateTag::new(self.0.clone())
}
}
#[derive(Default)]
struct ConnectionsInner {
notify: AcceptNotify,
conn: AtomicUsize,
connrate: AtomicUsize,
maxconn: usize,
maxconnrate: usize,
maxconn_low: usize,
maxconnrate_low: usize,
}
impl ConnectionsInner {
fn available(&self) -> bool {
if self.maxconnrate <= self.connrate.load(Ordering::Relaxed) {
false
} else {
self.maxconn > self.conn.load(Ordering::Relaxed)
}
}
fn notify_maxconn(&self, maxconn: usize) {
if maxconn > self.maxconn_low && maxconn <= self.maxconn {
self.notify.notify();
}
}
fn notify_maxconnrate(&self, connrate: usize) {
if connrate > self.maxconnrate_low && connrate <= self.maxconnrate {
self.notify.notify();
}
}
}
/// Type responsible for max connection stat.
///
/// Max connections stat get updated on drop.
pub struct ConnectionTag(Arc<ConnectionsInner>);
impl ConnectionTag {
fn new(inner: Arc<ConnectionsInner>) -> Self {
inner.conn.fetch_add(1, Ordering::Relaxed);
ConnectionTag(inner)
}
}
impl Drop for ConnectionTag {
fn drop(&mut self) {
let conn = self.0.conn.fetch_sub(1, Ordering::Relaxed);
self.0.notify_maxconn(conn);
}
}
/// Type responsible for max connection rate stat.
///
/// Max connections rate stat get updated on drop.
pub struct ConnectionRateTag(Arc<ConnectionsInner>);
impl ConnectionRateTag {
fn new(inner: Arc<ConnectionsInner>) -> Self {
inner.connrate.fetch_add(1, Ordering::Relaxed);
ConnectionRateTag(inner)
}
}
impl Drop for ConnectionRateTag {
fn drop(&mut self) {
let connrate = self.0.connrate.fetch_sub(1, Ordering::Relaxed);
self.0.notify_maxconnrate(connrate);
}
}

272
src/server/service.rs Normal file
View File

@@ -0,0 +1,272 @@
use std::marker::PhantomData;
use std::time::Duration;
use actix_net::service::{NewService, Service};
use futures::future::{ok, FutureResult};
use futures::{Async, Poll};
use super::channel::{H1Channel, HttpChannel};
use super::error::HttpDispatchError;
use super::handler::HttpHandler;
use super::settings::ServiceConfig;
use super::IoStream;
/// `NewService` implementation for HTTP1/HTTP2 transports
pub struct HttpService<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
settings: ServiceConfig<H>,
_t: PhantomData<Io>,
}
impl<H, Io> HttpService<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
/// Create new `HttpService` instance.
pub fn new(settings: ServiceConfig<H>) -> Self {
HttpService {
settings,
_t: PhantomData,
}
}
}
impl<H, Io> NewService for HttpService<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
type Request = Io;
type Response = ();
type Error = HttpDispatchError;
type InitError = ();
type Service = HttpServiceHandler<H, Io>;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self) -> Self::Future {
ok(HttpServiceHandler::new(self.settings.clone()))
}
}
pub struct HttpServiceHandler<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
settings: ServiceConfig<H>,
_t: PhantomData<Io>,
}
impl<H, Io> HttpServiceHandler<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
fn new(settings: ServiceConfig<H>) -> HttpServiceHandler<H, Io> {
HttpServiceHandler {
settings,
_t: PhantomData,
}
}
}
impl<H, Io> Service for HttpServiceHandler<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
type Request = Io;
type Response = ();
type Error = HttpDispatchError;
type Future = HttpChannel<Io, H>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, req: Self::Request) -> Self::Future {
HttpChannel::new(self.settings.clone(), req)
}
}
/// `NewService` implementation for HTTP1 transport
pub struct H1Service<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
settings: ServiceConfig<H>,
_t: PhantomData<Io>,
}
impl<H, Io> H1Service<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
/// Create new `HttpService` instance.
pub fn new(settings: ServiceConfig<H>) -> Self {
H1Service {
settings,
_t: PhantomData,
}
}
}
impl<H, Io> NewService for H1Service<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
type Request = Io;
type Response = ();
type Error = HttpDispatchError;
type InitError = ();
type Service = H1ServiceHandler<H, Io>;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self) -> Self::Future {
ok(H1ServiceHandler::new(self.settings.clone()))
}
}
/// `Service` implementation for HTTP1 transport
pub struct H1ServiceHandler<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
settings: ServiceConfig<H>,
_t: PhantomData<Io>,
}
impl<H, Io> H1ServiceHandler<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
fn new(settings: ServiceConfig<H>) -> H1ServiceHandler<H, Io> {
H1ServiceHandler {
settings,
_t: PhantomData,
}
}
}
impl<H, Io> Service for H1ServiceHandler<H, Io>
where
H: HttpHandler,
Io: IoStream,
{
type Request = Io;
type Response = ();
type Error = HttpDispatchError;
type Future = H1Channel<Io, H>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, req: Self::Request) -> Self::Future {
H1Channel::new(self.settings.clone(), req)
}
}
/// `NewService` implementation for stream configuration service
///
/// Stream configuration service allows to change some socket level
/// parameters. for example `tcp nodelay` or `tcp keep-alive`.
pub struct StreamConfiguration<T, E> {
no_delay: Option<bool>,
tcp_ka: Option<Option<Duration>>,
_t: PhantomData<(T, E)>,
}
impl<T, E> Default for StreamConfiguration<T, E> {
fn default() -> Self {
Self::new()
}
}
impl<T, E> StreamConfiguration<T, E> {
/// Create new `StreamConfigurationService` instance.
pub fn new() -> Self {
Self {
no_delay: None,
tcp_ka: None,
_t: PhantomData,
}
}
/// Sets the value of the `TCP_NODELAY` option on this socket.
pub fn nodelay(mut self, nodelay: bool) -> Self {
self.no_delay = Some(nodelay);
self
}
/// Sets whether keepalive messages are enabled to be sent on this socket.
pub fn tcp_keepalive(mut self, keepalive: Option<Duration>) -> Self {
self.tcp_ka = Some(keepalive);
self
}
}
impl<T: IoStream, E> NewService for StreamConfiguration<T, E> {
type Request = T;
type Response = T;
type Error = E;
type InitError = ();
type Service = StreamConfigurationService<T, E>;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self) -> Self::Future {
ok(StreamConfigurationService {
no_delay: self.no_delay,
tcp_ka: self.tcp_ka,
_t: PhantomData,
})
}
}
/// Stream configuration service
///
/// Stream configuration service allows to change some socket level
/// parameters. for example `tcp nodelay` or `tcp keep-alive`.
pub struct StreamConfigurationService<T, E> {
no_delay: Option<bool>,
tcp_ka: Option<Option<Duration>>,
_t: PhantomData<(T, E)>,
}
impl<T, E> Service for StreamConfigurationService<T, E>
where
T: IoStream,
{
type Request = T;
type Response = T;
type Error = E;
type Future = FutureResult<T, E>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, mut req: Self::Request) -> Self::Future {
if let Some(no_delay) = self.no_delay {
if req.set_nodelay(no_delay).is_err() {
error!("Can not set socket no-delay option");
}
}
if let Some(keepalive) = self.tcp_ka {
if req.set_keepalive(keepalive).is_err() {
error!("Can not set socket keep-alive option");
}
}
ok(req)
}
}

View File

@@ -1,23 +1,21 @@
use std::cell::{RefCell, RefMut, UnsafeCell};
use std::cell::{Cell, RefCell};
use std::collections::VecDeque;
use std::fmt::Write;
use std::rc::Rc;
use std::time::{Duration, Instant};
use std::{env, fmt, net};
use actix::Arbiter;
use bytes::BytesMut;
use futures::Stream;
use futures::{future, Future};
use futures_cpupool::CpuPool;
use http::StatusCode;
use lazycell::LazyCell;
use parking_lot::Mutex;
use time;
use tokio_timer::Interval;
use tokio_current_thread::spawn;
use tokio_timer::{sleep, Delay};
use super::channel::Node;
use super::message::{Request, RequestPool};
use super::server::{ConnectionRateTag, ConnectionTag, Connections};
use super::KeepAlive;
use body::Body;
use httpresponse::{HttpResponse, HttpResponseBuilder, HttpResponsePool};
@@ -44,7 +42,7 @@ lazy_static! {
/// Various server settings
pub struct ServerSettings {
addr: Option<net::SocketAddr>,
addr: net::SocketAddr,
secure: bool,
host: String,
cpu_pool: LazyCell<CpuPool>,
@@ -66,7 +64,7 @@ impl Clone for ServerSettings {
impl Default for ServerSettings {
fn default() -> Self {
ServerSettings {
addr: None,
addr: "127.0.0.1:8080".parse().unwrap(),
secure: false,
host: "localhost:8080".to_owned(),
responses: HttpResponsePool::get_pool(),
@@ -78,15 +76,9 @@ impl Default for ServerSettings {
impl ServerSettings {
/// Crate server settings instance
pub(crate) fn new(
addr: Option<net::SocketAddr>, host: &Option<String>, secure: bool,
addr: net::SocketAddr, host: &str, secure: bool,
) -> ServerSettings {
let host = if let Some(ref host) = *host {
host.clone()
} else if let Some(ref addr) = addr {
format!("{}", addr)
} else {
"localhost".to_owned()
};
let host = host.to_owned();
let cpu_pool = LazyCell::new();
let responses = HttpResponsePool::get_pool();
ServerSettings {
@@ -99,7 +91,7 @@ impl ServerSettings {
}
/// Returns the socket address of the local half of this TCP connection
pub fn local_addr(&self) -> Option<net::SocketAddr> {
pub fn local_addr(&self) -> net::SocketAddr {
self.addr
}
@@ -134,119 +126,294 @@ impl ServerSettings {
// "Sun, 06 Nov 1994 08:49:37 GMT".len()
const DATE_VALUE_LENGTH: usize = 29;
pub(crate) struct WorkerSettings<H> {
h: Vec<H>,
keep_alive: u64,
/// Http service configuration
pub struct ServiceConfig<H>(Rc<Inner<H>>);
struct Inner<H> {
handler: H,
keep_alive: Option<Duration>,
client_timeout: u64,
client_shutdown: u64,
ka_enabled: bool,
bytes: Rc<SharedBytesPool>,
messages: &'static RequestPool,
conns: Connections,
node: RefCell<Node<()>>,
date: UnsafeCell<Date>,
date: Cell<Option<Date>>,
}
impl<H: 'static> WorkerSettings<H> {
pub(crate) fn create(
apps: Vec<H>, keep_alive: KeepAlive, settings: ServerSettings,
conns: Connections,
) -> Rc<WorkerSettings<H>> {
let settings = Rc::new(Self::new(apps, keep_alive, settings, conns));
// periodic date update
let s = settings.clone();
Arbiter::spawn(
Interval::new(Instant::now(), Duration::from_secs(1))
.map_err(|_| ())
.and_then(move |_| {
s.update_date();
Ok(())
}).fold((), |(), _| Ok(())),
);
settings
impl<H> Clone for ServiceConfig<H> {
fn clone(&self) -> Self {
ServiceConfig(self.0.clone())
}
}
impl<H> WorkerSettings<H> {
impl<H> ServiceConfig<H> {
/// Create instance of `ServiceConfig`
pub(crate) fn new(
h: Vec<H>, keep_alive: KeepAlive, settings: ServerSettings, conns: Connections,
) -> WorkerSettings<H> {
handler: H, keep_alive: KeepAlive, client_timeout: u64, client_shutdown: u64,
settings: ServerSettings,
) -> ServiceConfig<H> {
let (keep_alive, ka_enabled) = match keep_alive {
KeepAlive::Timeout(val) => (val as u64, true),
KeepAlive::Os | KeepAlive::Tcp(_) => (0, true),
KeepAlive::Disabled => (0, false),
};
let keep_alive = if ka_enabled && keep_alive > 0 {
Some(Duration::from_secs(keep_alive))
} else {
None
};
WorkerSettings {
h,
bytes: Rc::new(SharedBytesPool::new()),
messages: RequestPool::pool(settings),
node: RefCell::new(Node::head()),
date: UnsafeCell::new(Date::new()),
ServiceConfig(Rc::new(Inner {
handler,
keep_alive,
ka_enabled,
conns,
}
client_timeout,
client_shutdown,
bytes: Rc::new(SharedBytesPool::new()),
messages: RequestPool::pool(settings),
date: Cell::new(None),
}))
}
pub fn head(&self) -> RefMut<Node<()>> {
self.node.borrow_mut()
/// Create worker settings builder.
pub fn build(handler: H) -> ServiceConfigBuilder<H> {
ServiceConfigBuilder::new(handler)
}
pub fn handlers(&self) -> &Vec<H> {
&self.h
pub(crate) fn handler(&self) -> &H {
&self.0.handler
}
pub fn keep_alive(&self) -> u64 {
self.keep_alive
#[inline]
/// Keep alive duration if configured.
pub fn keep_alive(&self) -> Option<Duration> {
self.0.keep_alive
}
#[inline]
/// Return state of connection keep-alive funcitonality
pub fn keep_alive_enabled(&self) -> bool {
self.ka_enabled
self.0.ka_enabled
}
pub fn get_bytes(&self) -> BytesMut {
self.bytes.get_bytes()
pub(crate) fn get_bytes(&self) -> BytesMut {
self.0.bytes.get_bytes()
}
pub fn release_bytes(&self, bytes: BytesMut) {
self.bytes.release_bytes(bytes)
pub(crate) fn release_bytes(&self, bytes: BytesMut) {
self.0.bytes.release_bytes(bytes)
}
pub fn get_request(&self) -> Request {
RequestPool::get(self.messages)
}
pub fn connection(&self) -> ConnectionTag {
self.conns.connection()
}
fn update_date(&self) {
// Unsafe: WorkerSetting is !Sync and !Send
unsafe { &mut *self.date.get() }.update();
}
pub fn set_date(&self, dst: &mut BytesMut, full: bool) {
// Unsafe: WorkerSetting is !Sync and !Send
let date_bytes = unsafe { &(*self.date.get()).bytes };
if full {
let mut buf: [u8; 39] = [0; 39];
buf[..6].copy_from_slice(b"date: ");
buf[6..35].copy_from_slice(date_bytes);
buf[35..].copy_from_slice(b"\r\n\r\n");
dst.extend_from_slice(&buf);
} else {
dst.extend_from_slice(date_bytes);
}
}
#[allow(dead_code)]
pub(crate) fn connection_rate(&self) -> ConnectionRateTag {
self.conns.connection_rate()
pub(crate) fn get_request(&self) -> Request {
RequestPool::get(self.0.messages)
}
}
impl<H: 'static> ServiceConfig<H> {
#[inline]
/// Client timeout for first request.
pub fn client_timer(&self) -> Option<Delay> {
let delay = self.0.client_timeout;
if delay != 0 {
Some(Delay::new(self.now() + Duration::from_millis(delay)))
} else {
None
}
}
/// Client timeout for first request.
pub fn client_timer_expire(&self) -> Option<Instant> {
let delay = self.0.client_timeout;
if delay != 0 {
Some(self.now() + Duration::from_millis(delay))
} else {
None
}
}
/// Client shutdown timer
pub fn client_shutdown_timer(&self) -> Option<Instant> {
let delay = self.0.client_shutdown;
if delay != 0 {
Some(self.now() + Duration::from_millis(delay))
} else {
None
}
}
#[inline]
/// Return keep-alive timer delay is configured.
pub fn keep_alive_timer(&self) -> Option<Delay> {
if let Some(ka) = self.0.keep_alive {
Some(Delay::new(self.now() + ka))
} else {
None
}
}
/// Keep-alive expire time
pub fn keep_alive_expire(&self) -> Option<Instant> {
if let Some(ka) = self.0.keep_alive {
Some(self.now() + ka)
} else {
None
}
}
fn check_date(&self) {
if unsafe { &*self.0.date.as_ptr() }.is_none() {
self.0.date.set(Some(Date::new()));
// periodic date update
let s = self.clone();
spawn(sleep(Duration::from_millis(500)).then(move |_| {
s.0.date.set(None);
future::ok(())
}));
}
}
pub(crate) fn set_date(&self, dst: &mut BytesMut, full: bool) {
self.check_date();
let date = &unsafe { &*self.0.date.as_ptr() }.as_ref().unwrap().bytes;
if full {
let mut buf: [u8; 39] = [0; 39];
buf[..6].copy_from_slice(b"date: ");
buf[6..35].copy_from_slice(date);
buf[35..].copy_from_slice(b"\r\n\r\n");
dst.extend_from_slice(&buf);
} else {
dst.extend_from_slice(date);
}
}
#[inline]
pub(crate) fn now(&self) -> Instant {
self.check_date();
unsafe { &*self.0.date.as_ptr() }.as_ref().unwrap().current
}
}
/// A service config builder
///
/// This type can be used to construct an instance of `ServiceConfig` through a
/// builder-like pattern.
pub struct ServiceConfigBuilder<H> {
handler: H,
keep_alive: KeepAlive,
client_timeout: u64,
client_shutdown: u64,
host: String,
addr: net::SocketAddr,
secure: bool,
}
impl<H> ServiceConfigBuilder<H> {
/// Create instance of `ServiceConfigBuilder`
pub fn new(handler: H) -> ServiceConfigBuilder<H> {
ServiceConfigBuilder {
handler,
keep_alive: KeepAlive::Timeout(5),
client_timeout: 5000,
client_shutdown: 5000,
secure: false,
host: "localhost".to_owned(),
addr: "127.0.0.1:8080".parse().unwrap(),
}
}
/// Enable secure flag for current server.
///
/// By default this flag is set to false.
pub fn secure(mut self) -> Self {
self.secure = true;
self
}
/// Set server keep-alive setting.
///
/// By default keep alive is set to a 5 seconds.
pub fn keep_alive<T: Into<KeepAlive>>(mut self, val: T) -> Self {
self.keep_alive = val.into();
self
}
/// Set server client timeout in milliseconds for first request.
///
/// Defines a timeout for reading client request header. If a client does not transmit
/// the entire set headers within this time, the request is terminated with
/// the 408 (Request Time-out) error.
///
/// To disable timeout set value to 0.
///
/// By default client timeout is set to 5000 milliseconds.
pub fn client_timeout(mut self, val: u64) -> Self {
self.client_timeout = val;
self
}
/// Set server connection shutdown timeout in milliseconds.
///
/// Defines a timeout for shutdown connection. If a shutdown procedure does not complete
/// within this time, the request is dropped. This timeout affects only secure connections.
///
/// To disable timeout set value to 0.
///
/// By default client timeout is set to 5000 milliseconds.
pub fn client_shutdown(mut self, val: u64) -> Self {
self.client_shutdown = val;
self
}
/// Set server host name.
///
/// Host name is used by application router aa a hostname for url
/// generation. Check [ConnectionInfo](./dev/struct.ConnectionInfo.
/// html#method.host) documentation for more information.
///
/// By default host name is set to a "localhost" value.
pub fn server_hostname(mut self, val: &str) -> Self {
self.host = val.to_owned();
self
}
/// Set server ip address.
///
/// Host name is used by application router aa a hostname for url
/// generation. Check [ConnectionInfo](./dev/struct.ConnectionInfo.
/// html#method.host) documentation for more information.
///
/// By default server address is set to a "127.0.0.1:8080"
pub fn server_address<S: net::ToSocketAddrs>(mut self, addr: S) -> Self {
match addr.to_socket_addrs() {
Err(err) => error!("Can not convert to SocketAddr: {}", err),
Ok(mut addrs) => if let Some(addr) = addrs.next() {
self.addr = addr;
},
}
self
}
/// Finish service configuration and create `ServiceConfig` object.
pub fn finish(self) -> ServiceConfig<H> {
let settings = ServerSettings::new(self.addr, &self.host, self.secure);
let client_shutdown = if self.secure { self.client_shutdown } else { 0 };
ServiceConfig::new(
self.handler,
self.keep_alive,
self.client_timeout,
client_shutdown,
settings,
)
}
}
#[derive(Copy, Clone)]
struct Date {
current: Instant,
bytes: [u8; DATE_VALUE_LENGTH],
pos: usize,
}
@@ -254,6 +421,7 @@ struct Date {
impl Date {
fn new() -> Date {
let mut date = Date {
current: Instant::now(),
bytes: [0; DATE_VALUE_LENGTH],
pos: 0,
};
@@ -262,6 +430,7 @@ impl Date {
}
fn update(&mut self) {
self.pos = 0;
self.current = Instant::now();
write!(self, "{}", time::at_utc(time::get_time()).rfc822()).unwrap();
}
}
@@ -303,6 +472,8 @@ impl SharedBytesPool {
#[cfg(test)]
mod tests {
use super::*;
use futures::future;
use tokio::runtime::current_thread;
#[test]
fn test_date_len() {
@@ -311,16 +482,22 @@ mod tests {
#[test]
fn test_date() {
let settings = WorkerSettings::<()>::new(
Vec::new(),
KeepAlive::Os,
ServerSettings::default(),
Connections::default(),
);
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf1, true);
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf2, true);
assert_eq!(buf1, buf2);
let mut rt = current_thread::Runtime::new().unwrap();
let _ = rt.block_on(future::lazy(|| {
let settings = ServiceConfig::<()>::new(
(),
KeepAlive::Os,
0,
0,
ServerSettings::default(),
);
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf1, true);
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf2, true);
assert_eq!(buf1, buf2);
future::ok::<_, ()>(())
}));
}
}

View File

@@ -1,12 +1,10 @@
#[cfg(feature = "alpn")]
#[cfg(any(feature = "alpn", feature = "ssl"))]
mod openssl;
#[cfg(feature = "alpn")]
pub use self::openssl::OpensslAcceptor;
#[cfg(any(feature = "alpn", feature = "ssl"))]
pub use self::openssl::{openssl_acceptor_with_flags, OpensslAcceptor};
#[cfg(feature = "tls")]
mod nativetls;
#[cfg(feature = "tls")]
pub use self::nativetls::{NativeTlsAcceptor, TlsStream};
#[cfg(feature = "rust-tls")]
mod rustls;

View File

@@ -1,61 +1,9 @@
use std::net::Shutdown;
use std::net::{Shutdown, SocketAddr};
use std::{io, time};
use futures::{Async, Future, Poll};
use native_tls::{self, HandshakeError, TlsAcceptor};
use tokio_io::{AsyncRead, AsyncWrite};
use actix_net::ssl::TlsStream;
use server::{AcceptorService, IoStream};
#[derive(Clone)]
/// Support `SSL` connections via native-tls package
///
/// `tls` feature enables `NativeTlsAcceptor` type
pub struct NativeTlsAcceptor {
acceptor: TlsAcceptor,
}
/// A wrapper around an underlying raw stream which implements the TLS or SSL
/// protocol.
///
/// A `TlsStream<S>` represents a handshake that has been completed successfully
/// and both the server and the client are ready for receiving and sending
/// data. Bytes read from a `TlsStream` are decrypted from `S` and bytes written
/// to a `TlsStream` are encrypted when passing through to `S`.
#[derive(Debug)]
pub struct TlsStream<S> {
inner: native_tls::TlsStream<S>,
}
/// Future returned from `NativeTlsAcceptor::accept` which will resolve
/// once the accept handshake has finished.
pub struct Accept<S> {
inner: Option<Result<native_tls::TlsStream<S>, HandshakeError<S>>>,
}
impl NativeTlsAcceptor {
/// Create `NativeTlsAcceptor` instance
pub fn new(acceptor: TlsAcceptor) -> Self {
NativeTlsAcceptor {
acceptor: acceptor.into(),
}
}
}
impl<Io: IoStream> AcceptorService<Io> for NativeTlsAcceptor {
type Accepted = TlsStream<Io>;
type Future = Accept<Io>;
fn scheme(&self) -> &'static str {
"https"
}
fn accept(&self, io: Io) -> Self::Future {
Accept {
inner: Some(self.acceptor.accept(io)),
}
}
}
use server::IoStream;
impl<Io: IoStream> IoStream for TlsStream<Io> {
#[inline]
@@ -64,6 +12,11 @@ impl<Io: IoStream> IoStream for TlsStream<Io> {
Ok(())
}
#[inline]
fn peer_addr(&self) -> Option<SocketAddr> {
self.get_ref().get_ref().peer_addr()
}
#[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
self.get_mut().get_mut().set_nodelay(nodelay)
@@ -73,71 +26,9 @@ impl<Io: IoStream> IoStream for TlsStream<Io> {
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_linger(dur)
}
}
impl<Io: IoStream> Future for Accept<Io> {
type Item = TlsStream<Io>;
type Error = io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.inner.take().expect("cannot poll MidHandshake twice") {
Ok(stream) => Ok(TlsStream { inner: stream }.into()),
Err(HandshakeError::Failure(e)) => {
Err(io::Error::new(io::ErrorKind::Other, e))
}
Err(HandshakeError::WouldBlock(s)) => match s.handshake() {
Ok(stream) => Ok(TlsStream { inner: stream }.into()),
Err(HandshakeError::Failure(e)) => {
Err(io::Error::new(io::ErrorKind::Other, e))
}
Err(HandshakeError::WouldBlock(s)) => {
self.inner = Some(Err(HandshakeError::WouldBlock(s)));
Ok(Async::NotReady)
}
},
}
}
}
impl<S> TlsStream<S> {
/// Get access to the internal `native_tls::TlsStream` stream which also
/// transitively allows access to `S`.
pub fn get_ref(&self) -> &native_tls::TlsStream<S> {
&self.inner
}
/// Get mutable access to the internal `native_tls::TlsStream` stream which
/// also transitively allows mutable access to `S`.
pub fn get_mut(&mut self) -> &mut native_tls::TlsStream<S> {
&mut self.inner
}
}
impl<S: io::Read + io::Write> io::Read for TlsStream<S> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf)
}
}
impl<S: io::Read + io::Write> io::Write for TlsStream<S> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.inner.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
impl<S: AsyncRead + AsyncWrite> AsyncRead for TlsStream<S> {}
impl<S: AsyncRead + AsyncWrite> AsyncWrite for TlsStream<S> {
fn shutdown(&mut self) -> Poll<(), io::Error> {
match self.inner.shutdown() {
Ok(_) => (),
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => (),
Err(e) => return Err(e),
}
self.inner.get_mut().shutdown()
#[inline]
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_keepalive(dur)
}
}

View File

@@ -1,80 +1,63 @@
use std::net::Shutdown;
use std::net::{Shutdown, SocketAddr};
use std::rc::Rc;
use std::{io, time};
use futures::{Future, Poll};
use actix_net::ssl;
use openssl::ssl::{AlpnError, SslAcceptor, SslAcceptorBuilder};
use tokio_openssl::{AcceptAsync, SslAcceptorExt, SslStream};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_openssl::SslStream;
use server::{AcceptorService, IoStream, ServerFlags};
use extensions::Extensions;
use server::{IoStream, ServerFlags};
#[derive(Clone)]
/// Support `SSL` connections via openssl package
///
/// `alpn` feature enables `OpensslAcceptor` type
pub struct OpensslAcceptor {
acceptor: SslAcceptor,
/// `ssl` feature enables `OpensslAcceptor` type
pub struct OpensslAcceptor<T> {
_t: ssl::OpensslAcceptor<T>,
}
impl OpensslAcceptor {
impl<T: AsyncRead + AsyncWrite> OpensslAcceptor<T> {
/// Create `OpensslAcceptor` with enabled `HTTP/2` and `HTTP1.1` support.
pub fn new(builder: SslAcceptorBuilder) -> io::Result<Self> {
pub fn new(builder: SslAcceptorBuilder) -> io::Result<ssl::OpensslAcceptor<T>> {
OpensslAcceptor::with_flags(builder, ServerFlags::HTTP1 | ServerFlags::HTTP2)
}
/// Create `OpensslAcceptor` with custom server flags.
pub fn with_flags(
mut builder: SslAcceptorBuilder, flags: ServerFlags,
) -> io::Result<Self> {
let mut protos = Vec::new();
if flags.contains(ServerFlags::HTTP1) {
protos.extend(b"\x08http/1.1");
}
if flags.contains(ServerFlags::HTTP2) {
protos.extend(b"\x02h2");
builder.set_alpn_select_callback(|_, protos| {
const H2: &[u8] = b"\x02h2";
if protos.windows(3).any(|window| window == H2) {
Ok(b"h2")
} else {
Err(AlpnError::NOACK)
}
});
}
builder: SslAcceptorBuilder, flags: ServerFlags,
) -> io::Result<ssl::OpensslAcceptor<T>> {
let acceptor = openssl_acceptor_with_flags(builder, flags)?;
if !protos.is_empty() {
builder.set_alpn_protos(&protos)?;
}
Ok(OpensslAcceptor {
acceptor: builder.build(),
})
Ok(ssl::OpensslAcceptor::new(acceptor))
}
}
pub struct AcceptorFut<Io>(AcceptAsync<Io>);
impl<Io: IoStream> Future for AcceptorFut<Io> {
type Item = SslStream<Io>;
type Error = io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
self.0
.poll()
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
/// Configure `SslAcceptorBuilder` with custom server flags.
pub fn openssl_acceptor_with_flags(
mut builder: SslAcceptorBuilder, flags: ServerFlags,
) -> io::Result<SslAcceptor> {
let mut protos = Vec::new();
if flags.contains(ServerFlags::HTTP1) {
protos.extend(b"\x08http/1.1");
}
}
impl<Io: IoStream> AcceptorService<Io> for OpensslAcceptor {
type Accepted = SslStream<Io>;
type Future = AcceptorFut<Io>;
fn scheme(&self) -> &'static str {
"https"
if flags.contains(ServerFlags::HTTP2) {
protos.extend(b"\x02h2");
builder.set_alpn_select_callback(|_, protos| {
const H2: &[u8] = b"\x02h2";
if protos.windows(3).any(|window| window == H2) {
Ok(b"h2")
} else {
Err(AlpnError::NOACK)
}
});
}
fn accept(&self, io: Io) -> Self::Future {
AcceptorFut(SslAcceptorExt::accept_async(&self.acceptor, io))
if !protos.is_empty() {
builder.set_alpn_protos(&protos)?;
}
Ok(builder.build())
}
impl<T: IoStream> IoStream for SslStream<T> {
@@ -84,6 +67,11 @@ impl<T: IoStream> IoStream for SslStream<T> {
Ok(())
}
#[inline]
fn peer_addr(&self) -> Option<SocketAddr> {
self.get_ref().get_ref().peer_addr()
}
#[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
self.get_mut().get_mut().set_nodelay(nodelay)
@@ -93,4 +81,19 @@ impl<T: IoStream> IoStream for SslStream<T> {
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_linger(dur)
}
#[inline]
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_keepalive(dur)
}
fn extensions(&self) -> Option<Rc<Extensions>> {
if let Some(x509) = self.get_ref().ssl().peer_certificate() {
let mut extensions = Extensions::new();
extensions.insert(x509);
Some(Rc::new(extensions))
} else {
None
}
}
}

View File

@@ -1,29 +1,25 @@
use std::net::Shutdown;
use std::sync::Arc;
use std::net::{Shutdown, SocketAddr};
use std::{io, time};
use actix_net::ssl; //::RustlsAcceptor;
use rustls::{ClientSession, ServerConfig, ServerSession};
use tokio_io::AsyncWrite;
use tokio_rustls::{AcceptAsync, ServerConfigExt, TlsStream};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_rustls::TlsStream;
use server::{AcceptorService, IoStream, ServerFlags};
use server::{IoStream, ServerFlags};
#[derive(Clone)]
/// Support `SSL` connections via rustls package
///
/// `rust-tls` feature enables `RustlsAcceptor` type
pub struct RustlsAcceptor {
config: Arc<ServerConfig>,
pub struct RustlsAcceptor<T> {
_t: ssl::RustlsAcceptor<T>,
}
impl RustlsAcceptor {
/// Create `OpensslAcceptor` with enabled `HTTP/2` and `HTTP1.1` support.
pub fn new(config: ServerConfig) -> Self {
RustlsAcceptor::with_flags(config, ServerFlags::HTTP1 | ServerFlags::HTTP2)
}
/// Create `OpensslAcceptor` with custom server flags.
pub fn with_flags(mut config: ServerConfig, flags: ServerFlags) -> Self {
impl<T: AsyncRead + AsyncWrite> RustlsAcceptor<T> {
/// Create `RustlsAcceptor` with custom server flags.
pub fn with_flags(
mut config: ServerConfig, flags: ServerFlags,
) -> ssl::RustlsAcceptor<T> {
let mut protos = Vec::new();
if flags.contains(ServerFlags::HTTP2) {
protos.push("h2".to_string());
@@ -35,22 +31,7 @@ impl RustlsAcceptor {
config.set_protocols(&protos);
}
RustlsAcceptor {
config: Arc::new(config),
}
}
}
impl<Io: IoStream> AcceptorService<Io> for RustlsAcceptor {
type Accepted = TlsStream<Io, ServerSession>;
type Future = AcceptAsync<Io>;
fn scheme(&self) -> &'static str {
"https"
}
fn accept(&self, io: Io) -> Self::Future {
ServerConfigExt::accept_async(&self.config, io)
ssl::RustlsAcceptor::new(config)
}
}
@@ -70,6 +51,11 @@ impl<Io: IoStream> IoStream for TlsStream<Io, ClientSession> {
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().0.set_linger(dur)
}
#[inline]
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().0.set_keepalive(dur)
}
}
impl<Io: IoStream> IoStream for TlsStream<Io, ServerSession> {
@@ -79,6 +65,11 @@ impl<Io: IoStream> IoStream for TlsStream<Io, ServerSession> {
Ok(())
}
#[inline]
fn peer_addr(&self) -> Option<SocketAddr> {
self.get_ref().0.peer_addr()
}
#[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
self.get_mut().0.set_nodelay(nodelay)
@@ -88,4 +79,9 @@ impl<Io: IoStream> IoStream for TlsStream<Io, ServerSession> {
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().0.set_linger(dur)
}
#[inline]
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().0.set_keepalive(dur)
}
}

View File

@@ -1,139 +0,0 @@
use std::{net, time};
use futures::sync::mpsc::{SendError, UnboundedSender};
use futures::sync::oneshot;
use futures::Future;
use actix::msgs::StopArbiter;
use actix::{Actor, Arbiter, AsyncContext, Context, Handler, Message, Response};
use super::server::{Connections, ServiceHandler};
use super::Token;
#[derive(Message)]
pub(crate) struct Conn<T> {
pub io: T,
pub handler: Token,
pub token: Token,
pub peer: Option<net::SocketAddr>,
}
pub(crate) struct Socket {
pub lst: net::TcpListener,
pub addr: net::SocketAddr,
pub token: Token,
}
#[derive(Clone)]
pub(crate) struct WorkerClient {
pub idx: usize,
tx: UnboundedSender<Conn<net::TcpStream>>,
conns: Connections,
}
impl WorkerClient {
pub fn new(
idx: usize, tx: UnboundedSender<Conn<net::TcpStream>>, conns: Connections,
) -> Self {
WorkerClient { idx, tx, conns }
}
pub fn send(
&self, msg: Conn<net::TcpStream>,
) -> Result<(), SendError<Conn<net::TcpStream>>> {
self.tx.unbounded_send(msg)
}
pub fn available(&self) -> bool {
self.conns.available()
}
}
/// Stop worker message. Returns `true` on successful shutdown
/// and `false` if some connections still alive.
pub(crate) struct StopWorker {
pub graceful: Option<time::Duration>,
}
impl Message for StopWorker {
type Result = Result<bool, ()>;
}
/// Http worker
///
/// Worker accepts Socket objects via unbounded channel and start requests
/// processing.
pub(crate) struct Worker {
conns: Connections,
handlers: Vec<Box<ServiceHandler>>,
}
impl Actor for Worker {
type Context = Context<Self>;
}
impl Worker {
pub(crate) fn new(conns: Connections, handlers: Vec<Box<ServiceHandler>>) -> Self {
Worker { conns, handlers }
}
fn shutdown(&self, force: bool) {
self.handlers.iter().for_each(|h| h.shutdown(force));
}
fn shutdown_timeout(
&self, ctx: &mut Context<Worker>, tx: oneshot::Sender<bool>, dur: time::Duration,
) {
// sleep for 1 second and then check again
ctx.run_later(time::Duration::new(1, 0), move |slf, ctx| {
let num = slf.conns.num_connections();
if num == 0 {
let _ = tx.send(true);
Arbiter::current().do_send(StopArbiter(0));
} else if let Some(d) = dur.checked_sub(time::Duration::new(1, 0)) {
slf.shutdown_timeout(ctx, tx, d);
} else {
info!("Force shutdown http worker, {} connections", num);
slf.shutdown(true);
let _ = tx.send(false);
Arbiter::current().do_send(StopArbiter(0));
}
});
}
}
impl Handler<Conn<net::TcpStream>> for Worker {
type Result = ();
fn handle(&mut self, msg: Conn<net::TcpStream>, _: &mut Context<Self>) {
self.handlers[msg.handler.0].handle(msg.token, msg.io, msg.peer)
}
}
/// `StopWorker` message handler
impl Handler<StopWorker> for Worker {
type Result = Response<bool, ()>;
fn handle(&mut self, msg: StopWorker, ctx: &mut Context<Self>) -> Self::Result {
let num = self.conns.num_connections();
if num == 0 {
info!("Shutting down http worker, 0 connections");
Response::reply(Ok(true))
} else if let Some(dur) = msg.graceful {
self.shutdown(false);
let (tx, rx) = oneshot::channel();
let num = self.conns.num_connections();
if num != 0 {
info!("Graceful http worker shutdown, {} connections", num);
self.shutdown_timeout(ctx, tx, dur);
Response::reply(Ok(true))
} else {
Response::async(rx.map_err(|_| ()))
}
} else {
info!("Force shutdown http worker, {} connections", num);
self.shutdown(true);
Response::reply(Ok(false))
}
}
}

View File

@@ -4,8 +4,10 @@ use std::str::FromStr;
use std::sync::mpsc;
use std::{net, thread};
use actix_inner::{Actor, Addr, System};
use actix::{Actor, Addr, System};
use actix::actors::signal;
use actix_net::server::Server;
use cookie::Cookie;
use futures::Future;
use http::header::HeaderName;
@@ -13,14 +15,10 @@ use http::{HeaderMap, HttpTryFrom, Method, Uri, Version};
use net2::TcpBuilder;
use tokio::runtime::current_thread::Runtime;
#[cfg(feature = "alpn")]
#[cfg(any(feature = "alpn", feature = "ssl"))]
use openssl::ssl::SslAcceptorBuilder;
#[cfg(feature = "rust-tls")]
use rustls::ServerConfig;
#[cfg(feature = "alpn")]
use server::OpensslAcceptor;
#[cfg(feature = "rust-tls")]
use server::RustlsAcceptor;
use application::{App, HttpApplication};
use body::Binary;
@@ -70,6 +68,7 @@ pub struct TestServer {
ssl: bool,
conn: Addr<ClientConnector>,
rt: Runtime,
backend: Addr<Server>,
}
impl TestServer {
@@ -79,13 +78,13 @@ impl TestServer {
/// middlewares or set handlers for test application.
pub fn new<F>(config: F) -> Self
where
F: Sync + Send + 'static + Fn(&mut TestApp<()>),
F: Clone + Send + 'static + Fn(&mut TestApp<()>),
{
TestServerBuilder::new(|| ()).start(config)
}
/// Create test server builder
pub fn build() -> TestServerBuilder<()> {
pub fn build() -> TestServerBuilder<(), impl Fn() -> () + Clone + Send + 'static> {
TestServerBuilder::new(|| ())
}
@@ -94,19 +93,18 @@ impl TestServer {
/// This method can be used for constructing application state.
/// Also it can be used for external dependency initialization,
/// like creating sync actors for diesel integration.
pub fn build_with_state<F, S>(state: F) -> TestServerBuilder<S>
pub fn build_with_state<S, F>(state: F) -> TestServerBuilder<S, F>
where
F: Fn() -> S + Sync + Send + 'static,
F: Fn() -> S + Clone + Send + 'static,
S: 'static,
{
TestServerBuilder::new(state)
}
/// Start new test server with application factory
pub fn with_factory<F, U, H>(factory: F) -> Self
pub fn with_factory<F, H>(factory: F) -> Self
where
F: Fn() -> U + Sync + Send + 'static,
U: IntoIterator<Item = H> + 'static,
F: Fn() -> H + Send + Clone + 'static,
H: IntoHttpHandler + 'static,
{
let (tx, rx) = mpsc::channel();
@@ -117,28 +115,30 @@ impl TestServer {
let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap();
let local_addr = tcp.local_addr().unwrap();
HttpServer::new(factory)
let srv = HttpServer::new(factory)
.disable_signals()
.listen(tcp)
.keep_alive(5)
.start();
tx.send((System::current(), local_addr, TestServer::get_conn()))
tx.send((System::current(), local_addr, TestServer::get_conn(), srv))
.unwrap();
sys.run();
});
let (system, addr, conn) = rx.recv().unwrap();
let (system, addr, conn, backend) = rx.recv().unwrap();
System::set_current(system);
TestServer {
addr,
conn,
ssl: false,
rt: Runtime::new().unwrap(),
backend,
}
}
fn get_conn() -> Addr<ClientConnector> {
#[cfg(feature = "alpn")]
#[cfg(any(feature = "alpn", feature = "ssl"))]
{
use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
@@ -146,7 +146,10 @@ impl TestServer {
builder.set_verify(SslVerifyMode::NONE);
ClientConnector::with_connector(builder.build()).start()
}
#[cfg(all(feature = "rust-tls", not(feature = "alpn")))]
#[cfg(all(
feature = "rust-tls",
not(any(feature = "alpn", feature = "ssl"))
))]
{
use rustls::ClientConfig;
use std::fs::File;
@@ -156,7 +159,7 @@ impl TestServer {
config.root_store.add_pem_file(pem_file).unwrap();
ClientConnector::with_connector(config).start()
}
#[cfg(not(any(feature = "alpn", feature = "rust-tls")))]
#[cfg(not(any(feature = "alpn", feature = "ssl", feature = "rust-tls")))]
{
ClientConnector::default().start()
}
@@ -198,6 +201,7 @@ impl TestServer {
/// Stop http server
fn stop(&mut self) {
let _ = self.backend.send(signal::Signal(signal::SignalType::Term)).wait();
System::current().stop();
}
@@ -235,6 +239,11 @@ impl TestServer {
ClientRequest::post(self.url("/").as_str())
}
/// Create `PATCH` request
pub fn patch(&self) -> ClientRequestBuilder {
ClientRequest::patch(self.url("/").as_str())
}
/// Create `HEAD` request
pub fn head(&self) -> ClientRequestBuilder {
ClientRequest::head(self.url("/").as_str())
@@ -260,30 +269,33 @@ impl Drop for TestServer {
///
/// This type can be used to construct an instance of `TestServer` through a
/// builder-like pattern.
pub struct TestServerBuilder<S> {
state: Box<Fn() -> S + Sync + Send + 'static>,
#[cfg(feature = "alpn")]
pub struct TestServerBuilder<S, F>
where
F: Fn() -> S + Send + Clone + 'static,
{
state: F,
#[cfg(any(feature = "alpn", feature = "ssl"))]
ssl: Option<SslAcceptorBuilder>,
#[cfg(feature = "rust-tls")]
rust_ssl: Option<ServerConfig>,
}
impl<S: 'static> TestServerBuilder<S> {
impl<S: 'static, F> TestServerBuilder<S, F>
where
F: Fn() -> S + Send + Clone + 'static,
{
/// Create a new test server
pub fn new<F>(state: F) -> TestServerBuilder<S>
where
F: Fn() -> S + Sync + Send + 'static,
{
pub fn new(state: F) -> TestServerBuilder<S, F> {
TestServerBuilder {
state: Box::new(state),
#[cfg(feature = "alpn")]
state,
#[cfg(any(feature = "alpn", feature = "ssl"))]
ssl: None,
#[cfg(feature = "rust-tls")]
rust_ssl: None,
}
}
#[cfg(feature = "alpn")]
#[cfg(any(feature = "alpn", feature = "ssl"))]
/// Create ssl server
pub fn ssl(mut self, ssl: SslAcceptorBuilder) -> Self {
self.ssl = Some(ssl);
@@ -299,15 +311,15 @@ impl<S: 'static> TestServerBuilder<S> {
#[allow(unused_mut)]
/// Configure test application and run test server
pub fn start<F>(mut self, config: F) -> TestServer
pub fn start<C>(mut self, config: C) -> TestServer
where
F: Sync + Send + 'static + Fn(&mut TestApp<S>),
C: Fn(&mut TestApp<S>) + Clone + Send + 'static,
{
let (tx, rx) = mpsc::channel();
let mut has_ssl = false;
#[cfg(feature = "alpn")]
#[cfg(any(feature = "alpn", feature = "ssl"))]
{
has_ssl = has_ssl || self.ssl.is_some();
}
@@ -326,19 +338,19 @@ impl<S: 'static> TestServerBuilder<S> {
let mut srv = HttpServer::new(move || {
let mut app = TestApp::new(state());
config(&mut app);
vec![app]
app
}).workers(1)
.keep_alive(5)
.disable_signals();
tx.send((System::current(), addr, TestServer::get_conn()))
.unwrap();
#[cfg(feature = "alpn")]
#[cfg(any(feature = "alpn", feature = "ssl"))]
{
let ssl = self.ssl.take();
if let Some(ssl) = ssl {
let tcp = net::TcpListener::bind(addr).unwrap();
srv = srv.listen_with(tcp, OpensslAcceptor::new(ssl).unwrap());
srv = srv.listen_ssl(tcp, ssl).unwrap();
}
}
#[cfg(feature = "rust-tls")]
@@ -346,25 +358,29 @@ impl<S: 'static> TestServerBuilder<S> {
let ssl = self.rust_ssl.take();
if let Some(ssl) = ssl {
let tcp = net::TcpListener::bind(addr).unwrap();
srv = srv.listen_with(tcp, RustlsAcceptor::new(ssl));
srv = srv.listen_rustls(tcp, ssl);
}
}
if !has_ssl {
let tcp = net::TcpListener::bind(addr).unwrap();
srv = srv.listen(tcp);
}
srv.start();
let backend = srv.start();
tx.send((System::current(), addr, TestServer::get_conn(), backend))
.unwrap();
sys.run();
});
let (system, addr, conn) = rx.recv().unwrap();
let (system, addr, conn, backend) = rx.recv().unwrap();
System::set_current(system);
TestServer {
addr,
conn,
ssl: has_ssl,
rt: Runtime::new().unwrap(),
backend,
}
}
}
@@ -504,6 +520,11 @@ impl TestRequest<()> {
{
TestRequest::default().header(key, value)
}
/// Create TestRequest and set request cookie
pub fn with_cookie(cookie: Cookie<'static>) -> TestRequest<()> {
TestRequest::default().cookie(cookie)
}
}
impl<S: 'static> TestRequest<S> {
@@ -540,6 +561,25 @@ impl<S: 'static> TestRequest<S> {
self
}
/// set cookie of this request
pub fn cookie(mut self, cookie: Cookie<'static>) -> Self {
if self.cookies.is_some() {
let mut should_insert = true;
let old_cookies = self.cookies.as_mut().unwrap();
for old_cookie in old_cookies.iter() {
if old_cookie == &cookie {
should_insert = false
};
};
if should_insert {
old_cookies.push(cookie);
};
} else {
self.cookies = Some(vec![cookie]);
};
self
}
/// Set a header
pub fn set<H: Header>(mut self, hdr: H) -> Self {
if let Ok(value) = hdr.try_into() {

View File

@@ -1,25 +1,12 @@
use http::Uri;
use std::rc::Rc;
#[allow(dead_code)]
const GEN_DELIMS: &[u8] = b":/?#[]@";
#[allow(dead_code)]
const SUB_DELIMS_WITHOUT_QS: &[u8] = b"!$'()*,";
#[allow(dead_code)]
const SUB_DELIMS: &[u8] = b"!$'()*,+?=;";
#[allow(dead_code)]
const RESERVED: &[u8] = b":/?#[]@!$'()*,+?=;";
#[allow(dead_code)]
const UNRESERVED: &[u8] = b"abcdefghijklmnopqrstuvwxyz
ABCDEFGHIJKLMNOPQRSTUVWXYZ
1234567890
-._~";
const ALLOWED: &[u8] = b"abcdefghijklmnopqrstuvwxyz
ABCDEFGHIJKLMNOPQRSTUVWXYZ
1234567890
-._~
!$'()*,";
const QS: &[u8] = b"+&=;b";
// https://tools.ietf.org/html/rfc3986#section-2.2
const RESERVED_PLUS_EXTRA: &[u8] = b":/?#[]@!$&'()*,+?;=%^ <>\"\\`{}|";
// https://tools.ietf.org/html/rfc3986#section-2.3
const UNRESERVED: &[u8] =
b"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890-._~";
#[inline]
fn bit_at(array: &[u8], ch: u8) -> bool {
@@ -32,7 +19,8 @@ fn set_bit(array: &mut [u8], ch: u8) {
}
lazy_static! {
static ref DEFAULT_QUOTER: Quoter = { Quoter::new(b"@:", b"/+") };
static ref UNRESERVED_QUOTER: Quoter = { Quoter::new(UNRESERVED) };
pub(crate) static ref RESERVED_QUOTER: Quoter = { Quoter::new(RESERVED_PLUS_EXTRA) };
}
#[derive(Default, Clone, Debug)]
@@ -43,7 +31,7 @@ pub(crate) struct Url {
impl Url {
pub fn new(uri: Uri) -> Url {
let path = DEFAULT_QUOTER.requote(uri.path().as_bytes());
let path = UNRESERVED_QUOTER.requote(uri.path().as_bytes());
Url { uri, path }
}
@@ -63,36 +51,19 @@ impl Url {
pub(crate) struct Quoter {
safe_table: [u8; 16],
protected_table: [u8; 16],
}
impl Quoter {
pub fn new(safe: &[u8], protected: &[u8]) -> Quoter {
pub fn new(safe: &[u8]) -> Quoter {
let mut q = Quoter {
safe_table: [0; 16],
protected_table: [0; 16],
};
// prepare safe table
for i in 0..128 {
if ALLOWED.contains(&i) {
set_bit(&mut q.safe_table, i);
}
if QS.contains(&i) {
set_bit(&mut q.safe_table, i);
}
}
for ch in safe {
set_bit(&mut q.safe_table, *ch)
}
// prepare protected table
for ch in protected {
set_bit(&mut q.safe_table, *ch);
set_bit(&mut q.protected_table, *ch);
}
q
}
@@ -115,19 +86,17 @@ impl Quoter {
if let Some(ch) = restore_ch(pct[1], pct[2]) {
if ch < 128 {
if bit_at(&self.protected_table, ch) {
buf.extend_from_slice(&pct);
idx += 1;
continue;
}
if bit_at(&self.safe_table, ch) {
buf.push(ch);
idx += 1;
continue;
}
buf.extend_from_slice(&pct);
} else {
// Not ASCII, decode it
buf.push(ch);
}
buf.push(ch);
} else {
buf.extend_from_slice(&pct[..]);
}
@@ -148,7 +117,7 @@ impl Quoter {
if let Some(data) = cloned {
// Unsafe: we get data from http::Uri, which does utf-8 checks already
// this code only decodes valid pct encoded values
Some(unsafe { Rc::new(String::from_utf8_unchecked(data)) })
Some(Rc::new(unsafe { String::from_utf8_unchecked(data) }))
} else {
None
}
@@ -172,3 +141,37 @@ fn from_hex(v: u8) -> Option<u8> {
fn restore_ch(d1: u8, d2: u8) -> Option<u8> {
from_hex(d1).and_then(|d1| from_hex(d2).and_then(move |d2| Some(d1 << 4 | d2)))
}
#[cfg(test)]
mod tests {
use std::rc::Rc;
use super::*;
#[test]
fn decode_path() {
assert_eq!(UNRESERVED_QUOTER.requote(b"https://localhost:80/foo"), None);
assert_eq!(
Rc::try_unwrap(UNRESERVED_QUOTER.requote(
b"https://localhost:80/foo%25"
).unwrap()).unwrap(),
"https://localhost:80/foo%25".to_string()
);
assert_eq!(
Rc::try_unwrap(UNRESERVED_QUOTER.requote(
b"http://cache-service/http%3A%2F%2Flocalhost%3A80%2Ffoo"
).unwrap()).unwrap(),
"http://cache-service/http%3A%2F%2Flocalhost%3A80%2Ffoo".to_string()
);
assert_eq!(
Rc::try_unwrap(UNRESERVED_QUOTER.requote(
b"http://cache/http%3A%2F%2Flocal%3A80%2Ffile%2F%252Fvar%252Flog%0A"
).unwrap()).unwrap(),
"http://cache/http%3A%2F%2Flocal%3A80%2Ffile%2F%252Fvar%252Flog%0A".to_string()
);
}
}

View File

@@ -12,7 +12,6 @@ trait FnWith<T, R>: 'static {
}
impl<T, R, F: Fn(T) -> R + 'static> FnWith<T, R> for F {
#[cfg_attr(feature = "cargo-clippy", allow(boxed_local))]
fn call_with(self: &Self, arg: T) -> R {
(*self)(arg)
}
@@ -42,24 +41,6 @@ where
fn create_with_config(self, T::Config) -> WithAsync<T, S, R, I, E>;
}
// impl<T1, T2, T3, S, F, R> WithFactory<(T1, T2, T3), S, R> for F
// where F: Fn(T1, T2, T3) -> R + 'static,
// T1: FromRequest<S> + 'static,
// T2: FromRequest<S> + 'static,
// T3: FromRequest<S> + 'static,
// R: Responder + 'static,
// S: 'static,
// {
// fn create(self) -> With<(T1, T2, T3), S, R> {
// With::new(move |(t1, t2, t3)| (self)(t1, t2, t3), (
// T1::Config::default(), T2::Config::default(), T3::Config::default()))
// }
// fn create_with_config(self, cfg: (T1::Config, T2::Config, T3::Config,)) -> With<(T1, T2, T3), S, R> {
// With::new(move |(t1, t2, t3)| (self)(t1, t2, t3), cfg)
// }
// }
#[doc(hidden)]
pub struct With<T, S, R>
where
@@ -105,7 +86,7 @@ where
match fut.poll() {
Ok(Async::Ready(resp)) => AsyncResult::ok(resp),
Ok(Async::NotReady) => AsyncResult::async(Box::new(fut)),
Ok(Async::NotReady) => AsyncResult::future(Box::new(fut)),
Err(e) => AsyncResult::err(e),
}
}
@@ -227,7 +208,7 @@ where
match fut.poll() {
Ok(Async::Ready(resp)) => AsyncResult::ok(resp),
Ok(Async::NotReady) => AsyncResult::async(Box::new(fut)),
Ok(Async::NotReady) => AsyncResult::future(Box::new(fut)),
Err(e) => AsyncResult::err(e),
}
}

View File

@@ -231,6 +231,13 @@ where
pub fn handle(&self) -> SpawnHandle {
self.inner.curr_handle()
}
/// Set mailbox capacity
///
/// By default mailbox capacity is 16 messages.
pub fn set_mailbox_capacity(&mut self, cap: usize) {
self.inner.set_mailbox_capacity(cap)
}
}
impl<A, S> WsWriter for WebsocketContext<A, S>

View File

@@ -50,7 +50,10 @@ pub(crate) fn apply_mask(buf: &mut [u8], mask_u32: u32) {
// TODO: copy_nonoverlapping here compiles to call memcpy. While it is not so
// inefficient, it could be done better. The compiler does not understand that
// a `ShortSlice` must be smaller than a u64.
#[cfg_attr(feature = "cargo-clippy", allow(needless_pass_by_value))]
#[cfg_attr(
feature = "cargo-clippy",
allow(needless_pass_by_value)
)]
fn xor_short(buf: ShortSlice, mask: u64) {
// Unsafe: we know that a `ShortSlice` fits in a u64
unsafe {

View File

@@ -8,7 +8,8 @@
//!
//! ```rust
//! # extern crate actix_web;
//! # use actix_web::actix::*;
//! # extern crate actix;
//! # use actix::prelude::*;
//! # use actix_web::*;
//! use actix_web::{ws, HttpRequest, HttpResponse};
//!

BIN
tests/identity.pfx Normal file

Binary file not shown.

1
tests/test space.binary Normal file
View File

@@ -0,0 +1 @@
<EFBFBD>TǑɂV<EFBFBD>2<EFBFBD>vI<EFBFBD><EFBFBD><EFBFBD>\<5C><52><CB99><EFBFBD>e<EFBFBD><04>vD<76>:藽<>RV<03>Yp<59><70>;<3B><>G<><47>p!2<7F>C<EFBFBD>.<2E> <0C><><EFBFBD><EFBFBD>pA !<21>ߦ<EFBFBD>x j+Uc<55><63><EFBFBD>X<13>c%<17>;<3B>"y<10><>AI

View File

@@ -179,7 +179,7 @@ fn test_client_gzip_encoding_large() {
#[test]
fn test_client_gzip_encoding_large_random() {
let data = rand::thread_rng()
.gen_ascii_chars()
.sample_iter(&rand::distributions::Alphanumeric)
.take(100_000)
.collect::<String>();
@@ -247,7 +247,7 @@ fn test_client_brotli_encoding() {
#[test]
fn test_client_brotli_encoding_large_random() {
let data = rand::thread_rng()
.gen_ascii_chars()
.sample_iter(&rand::distributions::Alphanumeric)
.take(70_000)
.collect::<String>();
@@ -309,7 +309,7 @@ fn test_client_deflate_encoding() {
#[test]
fn test_client_deflate_encoding_large_random() {
let data = rand::thread_rng()
.gen_ascii_chars()
.sample_iter(&rand::distributions::Alphanumeric)
.take(70_000)
.collect::<String>();
@@ -407,24 +407,29 @@ fn test_client_cookie_handling() {
let cookie2 = cookie2b.clone();
app.handler(move |req: &HttpRequest| {
// Check cookies were sent correctly
req.cookie("cookie1").ok_or_else(err)
.and_then(|c1| if c1.value() == "value1" {
req.cookie("cookie1")
.ok_or_else(err)
.and_then(|c1| {
if c1.value() == "value1" {
Ok(())
} else {
Err(err())
})
.and_then(|()| req.cookie("cookie2").ok_or_else(err))
.and_then(|c2| if c2.value() == "value2" {
}
}).and_then(|()| req.cookie("cookie2").ok_or_else(err))
.and_then(|c2| {
if c2.value() == "value2" {
Ok(())
} else {
Err(err())
})
// Send some cookies back
.map(|_| HttpResponse::Ok()
.cookie(cookie1.clone())
.cookie(cookie2.clone())
.finish()
)
}
})
// Send some cookies back
.map(|_| {
HttpResponse::Ok()
.cookie(cookie1.clone())
.cookie(cookie2.clone())
.finish()
})
})
});
@@ -501,3 +506,31 @@ fn client_read_until_eof() {
let bytes = sys.block_on(response.body()).unwrap();
assert_eq!(bytes, Bytes::from_static(b"welcome!"));
}
#[test]
fn client_basic_auth() {
let mut srv =
test::TestServer::new(|app| app.handler(|_| HttpResponse::Ok().body(STR)));
/// set authorization header to Basic <base64 encoded username:password>
let request = srv
.get()
.basic_auth("username", Some("password"))
.finish()
.unwrap();
let repr = format!("{:?}", request);
assert!(repr.contains("Basic dXNlcm5hbWU6cGFzc3dvcmQ="));
}
#[test]
fn client_bearer_auth() {
let mut srv =
test::TestServer::new(|app| app.handler(|_| HttpResponse::Ok().body(STR)));
/// set authorization header to Bearer <token>
let request = srv
.get()
.bearer_auth("someS3cr3tAutht0k3n")
.finish()
.unwrap();
let repr = format!("{:?}", request);
assert!(repr.contains("Bearer someS3cr3tAutht0k3n"));
}

View File

@@ -0,0 +1,81 @@
extern crate actix;
extern crate actix_net;
extern crate actix_web;
use std::{thread, time};
use actix::System;
use actix_net::server::Server;
use actix_net::service::NewServiceExt;
use actix_web::server::{HttpService, KeepAlive, ServiceConfig, StreamConfiguration};
use actix_web::{client, http, test, App, HttpRequest};
#[test]
fn test_custom_pipeline() {
let addr = test::TestServer::unused_addr();
thread::spawn(move || {
Server::new()
.bind("test", addr, move || {
let app = App::new()
.route("/", http::Method::GET, |_: HttpRequest| "OK")
.finish();
let settings = ServiceConfig::build(app)
.keep_alive(KeepAlive::Disabled)
.client_timeout(1000)
.client_shutdown(1000)
.server_hostname("localhost")
.server_address(addr)
.finish();
StreamConfiguration::new()
.nodelay(true)
.tcp_keepalive(Some(time::Duration::from_secs(10)))
.and_then(HttpService::new(settings))
}).unwrap()
.run();
});
let mut sys = System::new("test");
{
let req = client::ClientRequest::get(format!("http://{}/", addr).as_str())
.finish()
.unwrap();
let response = sys.block_on(req.send()).unwrap();
assert!(response.status().is_success());
}
}
#[test]
fn test_h1() {
use actix_web::server::H1Service;
let addr = test::TestServer::unused_addr();
thread::spawn(move || {
Server::new()
.bind("test", addr, move || {
let app = App::new()
.route("/", http::Method::GET, |_: HttpRequest| "OK")
.finish();
let settings = ServiceConfig::build(app)
.keep_alive(KeepAlive::Disabled)
.client_timeout(1000)
.client_shutdown(1000)
.server_hostname("localhost")
.server_address(addr)
.finish();
H1Service::new(settings)
}).unwrap()
.run();
});
let mut sys = System::new("test");
{
let req = client::ClientRequest::get(format!("http://{}/", addr).as_str())
.finish()
.unwrap();
let response = sys.block_on(req.send()).unwrap();
assert!(response.status().is_success());
}
}

View File

@@ -672,6 +672,6 @@ fn test_unsafe_path_route() {
let bytes = srv.execute(response.body()).unwrap();
assert_eq!(
bytes,
Bytes::from_static(b"success: http:%2F%2Fexample.com")
Bytes::from_static(b"success: http%3A%2F%2Fexample.com")
);
}

View File

@@ -1,4 +1,5 @@
extern crate actix;
extern crate actix_net;
extern crate actix_web;
#[cfg(feature = "brotli")]
extern crate brotli2;
@@ -9,9 +10,18 @@ extern crate h2;
extern crate http as modhttp;
extern crate rand;
extern crate tokio;
extern crate tokio_current_thread;
extern crate tokio_current_thread as current_thread;
extern crate tokio_reactor;
extern crate tokio_tcp;
#[cfg(feature = "tls")]
extern crate native_tls;
#[cfg(feature = "ssl")]
extern crate openssl;
#[cfg(feature = "rust-tls")]
extern crate rustls;
use std::io::{Read, Write};
use std::sync::Arc;
use std::{thread, time};
@@ -28,8 +38,8 @@ use h2::client as h2client;
use modhttp::Request;
use rand::distributions::Alphanumeric;
use rand::Rng;
use tokio::executor::current_thread;
use tokio::runtime::current_thread::Runtime;
use tokio_current_thread::spawn;
use tokio_tcp::TcpStream;
use actix_web::*;
@@ -883,6 +893,209 @@ fn test_brotli_encoding_large() {
assert_eq!(bytes, Bytes::from(data));
}
#[cfg(all(feature = "brotli", feature = "ssl"))]
#[test]
fn test_brotli_encoding_large_ssl() {
use actix::{Actor, System};
use openssl::ssl::{
SslAcceptor, SslConnector, SslFiletype, SslMethod, SslVerifyMode,
};
// load ssl keys
let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
builder
.set_private_key_file("tests/key.pem", SslFiletype::PEM)
.unwrap();
builder
.set_certificate_chain_file("tests/cert.pem")
.unwrap();
let data = STR.repeat(10);
let srv = test::TestServer::build().ssl(builder).start(|app| {
app.handler(|req: &HttpRequest| {
req.body()
.and_then(|bytes: Bytes| {
Ok(HttpResponse::Ok()
.content_encoding(http::ContentEncoding::Identity)
.body(bytes))
}).responder()
})
});
let mut rt = System::new("test");
// client connector
let mut builder = SslConnector::builder(SslMethod::tls()).unwrap();
builder.set_verify(SslVerifyMode::NONE);
let conn = client::ClientConnector::with_connector(builder.build()).start();
// body
let mut e = BrotliEncoder::new(Vec::new(), 5);
e.write_all(data.as_ref()).unwrap();
let enc = e.finish().unwrap();
// client request
let request = client::ClientRequest::build()
.uri(srv.url("/"))
.method(http::Method::POST)
.header(http::header::CONTENT_ENCODING, "br")
.with_connector(conn)
.body(enc)
.unwrap();
let response = rt.block_on(request.send()).unwrap();
assert!(response.status().is_success());
// read response
let bytes = rt.block_on(response.body()).unwrap();
assert_eq!(bytes, Bytes::from(data));
}
#[cfg(all(feature = "rust-tls", feature = "ssl"))]
#[test]
fn test_reading_deflate_encoding_large_random_ssl() {
use actix::{Actor, System};
use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
use rustls::internal::pemfile::{certs, rsa_private_keys};
use rustls::{NoClientAuth, ServerConfig};
use std::fs::File;
use std::io::BufReader;
// load ssl keys
let mut config = ServerConfig::new(NoClientAuth::new());
let cert_file = &mut BufReader::new(File::open("tests/cert.pem").unwrap());
let key_file = &mut BufReader::new(File::open("tests/key.pem").unwrap());
let cert_chain = certs(cert_file).unwrap();
let mut keys = rsa_private_keys(key_file).unwrap();
config.set_single_cert(cert_chain, keys.remove(0)).unwrap();
let data = rand::thread_rng()
.sample_iter(&Alphanumeric)
.take(160_000)
.collect::<String>();
let srv = test::TestServer::build().rustls(config).start(|app| {
app.handler(|req: &HttpRequest| {
req.body()
.and_then(|bytes: Bytes| {
Ok(HttpResponse::Ok()
.content_encoding(http::ContentEncoding::Identity)
.body(bytes))
}).responder()
})
});
let mut rt = System::new("test");
// client connector
let mut builder = SslConnector::builder(SslMethod::tls()).unwrap();
builder.set_verify(SslVerifyMode::NONE);
let conn = client::ClientConnector::with_connector(builder.build()).start();
// encode data
let mut e = ZlibEncoder::new(Vec::new(), Compression::default());
e.write_all(data.as_ref()).unwrap();
let enc = e.finish().unwrap();
// client request
let request = client::ClientRequest::build()
.uri(srv.url("/"))
.method(http::Method::POST)
.header(http::header::CONTENT_ENCODING, "deflate")
.with_connector(conn)
.body(enc)
.unwrap();
let response = rt.block_on(request.send()).unwrap();
assert!(response.status().is_success());
// read response
let bytes = rt.block_on(response.body()).unwrap();
assert_eq!(bytes.len(), data.len());
assert_eq!(bytes, Bytes::from(data));
}
#[cfg(all(feature = "tls", feature = "ssl"))]
#[test]
fn test_reading_deflate_encoding_large_random_tls() {
use native_tls::{Identity, TlsAcceptor};
use openssl::ssl::{
SslAcceptor, SslConnector, SslFiletype, SslMethod, SslVerifyMode,
};
use std::fs::File;
use std::sync::mpsc;
use actix::{Actor, System};
let (tx, rx) = mpsc::channel();
// load ssl keys
let mut file = File::open("tests/identity.pfx").unwrap();
let mut identity = vec![];
file.read_to_end(&mut identity).unwrap();
let identity = Identity::from_pkcs12(&identity, "1").unwrap();
let acceptor = TlsAcceptor::new(identity).unwrap();
// load ssl keys
let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
builder
.set_private_key_file("tests/key.pem", SslFiletype::PEM)
.unwrap();
builder
.set_certificate_chain_file("tests/cert.pem")
.unwrap();
let data = rand::thread_rng()
.sample_iter(&Alphanumeric)
.take(160_000)
.collect::<String>();
let addr = test::TestServer::unused_addr();
thread::spawn(move || {
System::run(move || {
server::new(|| {
App::new().handler("/", |req: &HttpRequest| {
req.body()
.and_then(|bytes: Bytes| {
Ok(HttpResponse::Ok()
.content_encoding(http::ContentEncoding::Identity)
.body(bytes))
}).responder()
})
}).bind_tls(addr, acceptor)
.unwrap()
.start();
let _ = tx.send(System::current());
});
});
let sys = rx.recv().unwrap();
let mut rt = System::new("test");
// client connector
let mut builder = SslConnector::builder(SslMethod::tls()).unwrap();
builder.set_verify(SslVerifyMode::NONE);
let conn = client::ClientConnector::with_connector(builder.build()).start();
// encode data
let mut e = ZlibEncoder::new(Vec::new(), Compression::default());
e.write_all(data.as_ref()).unwrap();
let enc = e.finish().unwrap();
// client request
let request = client::ClientRequest::build()
.uri(format!("https://{}/", addr))
.method(http::Method::POST)
.header(http::header::CONTENT_ENCODING, "deflate")
.with_connector(conn)
.body(enc)
.unwrap();
let response = rt.block_on(request.send()).unwrap();
assert!(response.status().is_success());
// read response
let bytes = rt.block_on(response.body()).unwrap();
assert_eq!(bytes.len(), data.len());
assert_eq!(bytes, Bytes::from(data));
let _ = sys.stop();
}
#[test]
fn test_h2() {
let srv = test::TestServer::new(|app| app.handler(|_| HttpResponse::Ok().body(STR)));
@@ -904,7 +1117,7 @@ fn test_h2() {
let (response, _) = client.send_request(request, false).unwrap();
// Spawn a task to run the conn...
current_thread::spawn(h2.map_err(|e| println!("GOT ERR={:?}", e)));
spawn(h2.map_err(|e| println!("GOT ERR={:?}", e)));
response.and_then(|response| {
assert_eq!(response.status(), http::StatusCode::OK);
@@ -1008,3 +1221,188 @@ fn test_server_cookies() {
assert_eq!(cookies[1], first_cookie);
}
}
#[test]
fn test_slow_request() {
use actix::System;
use std::net;
use std::sync::mpsc;
let (tx, rx) = mpsc::channel();
let addr = test::TestServer::unused_addr();
thread::spawn(move || {
System::run(move || {
let srv = server::new(|| {
vec![App::new().resource("/", |r| {
r.method(http::Method::GET).f(|_| HttpResponse::Ok())
})]
});
let srv = srv.bind(addr).unwrap();
srv.client_timeout(200).start();
let _ = tx.send(System::current());
});
});
let sys = rx.recv().unwrap();
thread::sleep(time::Duration::from_millis(200));
let mut stream = net::TcpStream::connect(addr).unwrap();
let mut data = String::new();
let _ = stream.read_to_string(&mut data);
assert!(data.starts_with("HTTP/1.1 408 Request Timeout"));
let mut stream = net::TcpStream::connect(addr).unwrap();
let _ = stream.write_all(b"GET /test/tests/test HTTP/1.1\r\n");
let mut data = String::new();
let _ = stream.read_to_string(&mut data);
assert!(data.starts_with("HTTP/1.1 408 Request Timeout"));
sys.stop();
}
#[test]
fn test_malformed_request() {
use actix::System;
use std::net;
use std::sync::mpsc;
let (tx, rx) = mpsc::channel();
let addr = test::TestServer::unused_addr();
thread::spawn(move || {
System::run(move || {
let srv = server::new(|| {
App::new().resource("/", |r| {
r.method(http::Method::GET).f(|_| HttpResponse::Ok())
})
});
let _ = srv.bind(addr).unwrap().start();
let _ = tx.send(System::current());
});
});
let sys = rx.recv().unwrap();
thread::sleep(time::Duration::from_millis(200));
let mut stream = net::TcpStream::connect(addr).unwrap();
let _ = stream.write_all(b"GET /test/tests/test HTTP1.1\r\n");
let mut data = String::new();
let _ = stream.read_to_string(&mut data);
assert!(data.starts_with("HTTP/1.1 400 Bad Request"));
sys.stop();
}
#[test]
fn test_app_404() {
let mut srv = test::TestServer::with_factory(|| {
App::new().prefix("/prefix").resource("/", |r| {
r.method(http::Method::GET).f(|_| HttpResponse::Ok())
})
});
let request = srv.client(http::Method::GET, "/prefix/").finish().unwrap();
let response = srv.execute(request.send()).unwrap();
assert!(response.status().is_success());
let request = srv.client(http::Method::GET, "/").finish().unwrap();
let response = srv.execute(request.send()).unwrap();
assert_eq!(response.status(), http::StatusCode::NOT_FOUND);
}
#[test]
#[cfg(feature = "ssl")]
fn test_ssl_handshake_timeout() {
use actix::System;
use openssl::ssl::{SslAcceptor, SslFiletype, SslMethod};
use std::net;
use std::sync::mpsc;
let (tx, rx) = mpsc::channel();
let addr = test::TestServer::unused_addr();
// load ssl keys
let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
builder
.set_private_key_file("tests/key.pem", SslFiletype::PEM)
.unwrap();
builder
.set_certificate_chain_file("tests/cert.pem")
.unwrap();
thread::spawn(move || {
System::run(move || {
let srv = server::new(|| {
App::new().resource("/", |r| {
r.method(http::Method::GET).f(|_| HttpResponse::Ok())
})
});
srv.bind_ssl(addr, builder)
.unwrap()
.workers(1)
.client_timeout(200)
.start();
let _ = tx.send(System::current());
});
});
let sys = rx.recv().unwrap();
let mut stream = net::TcpStream::connect(addr).unwrap();
let mut data = String::new();
let _ = stream.read_to_string(&mut data);
assert!(data.is_empty());
let _ = sys.stop();
}
#[test]
fn test_content_length() {
use actix_web::http::header::{HeaderName, HeaderValue};
use http::StatusCode;
let mut srv = test::TestServer::new(move |app| {
app.resource("/{status}", |r| {
r.f(|req: &HttpRequest| {
let indx: usize =
req.match_info().get("status").unwrap().parse().unwrap();
let statuses = [
StatusCode::NO_CONTENT,
StatusCode::CONTINUE,
StatusCode::SWITCHING_PROTOCOLS,
StatusCode::PROCESSING,
StatusCode::OK,
StatusCode::NOT_FOUND,
];
HttpResponse::new(statuses[indx])
})
});
});
let addr = srv.addr();
let mut get_resp = |i| {
let url = format!("http://{}/{}", addr, i);
let req = srv.get().uri(url).finish().unwrap();
srv.execute(req.send()).unwrap()
};
let header = HeaderName::from_static("content-length");
let value = HeaderValue::from_static("0");
for i in 0..4 {
let response = get_resp(i);
assert_eq!(response.headers().get(&header), None);
}
for i in 4..6 {
let response = get_resp(i);
assert_eq!(response.headers().get(&header), Some(&value));
}
}
#[test]
fn test_patch_method() {
let mut srv = test::TestServer::new(|app| app.handler(|_| HttpResponse::Ok()));
let req = srv.patch().finish().unwrap();
let response = srv.execute(req.send()).unwrap();
assert!(response.status().is_success());
}

View File

@@ -5,12 +5,16 @@ extern crate futures;
extern crate http;
extern crate rand;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::sync::Arc;
use std::{thread, time};
use bytes::Bytes;
use futures::Stream;
use rand::distributions::Alphanumeric;
use rand::Rng;
#[cfg(feature = "alpn")]
#[cfg(feature = "ssl")]
extern crate openssl;
#[cfg(feature = "rust-tls")]
extern crate rustls;
@@ -278,9 +282,8 @@ fn test_server_send_bin() {
}
#[test]
#[cfg(feature = "alpn")]
#[cfg(feature = "ssl")]
fn test_ws_server_ssl() {
extern crate openssl;
use openssl::ssl::{SslAcceptor, SslFiletype, SslMethod};
// load ssl keys
@@ -316,7 +319,6 @@ fn test_ws_server_ssl() {
#[test]
#[cfg(feature = "rust-tls")]
fn test_ws_server_rust_tls() {
extern crate rustls;
use rustls::internal::pemfile::{certs, rsa_private_keys};
use rustls::{NoClientAuth, ServerConfig};
use std::fs::File;
@@ -351,3 +353,43 @@ fn test_ws_server_rust_tls() {
assert_eq!(item, data);
}
}
struct WsStopped(Arc<AtomicUsize>);
impl Actor for WsStopped {
type Context = ws::WebsocketContext<Self>;
fn stopped(&mut self, _: &mut Self::Context) {
self.0.fetch_add(1, Ordering::Relaxed);
}
}
impl StreamHandler<ws::Message, ws::ProtocolError> for WsStopped {
fn handle(&mut self, msg: ws::Message, ctx: &mut Self::Context) {
match msg {
ws::Message::Text(text) => ctx.text(text),
_ => (),
}
}
}
#[test]
fn test_ws_stopped() {
let num = Arc::new(AtomicUsize::new(0));
let num2 = num.clone();
let mut srv = test::TestServer::new(move |app| {
let num3 = num2.clone();
app.handler(move |req| ws::start(req, WsStopped(num3.clone())))
});
{
let (reader, mut writer) = srv.ws().unwrap();
writer.text("text");
writer.close(None);
let (item, _) = srv.execute(reader.into_future()).unwrap();
assert_eq!(item, Some(ws::Message::Text("text".to_owned())));
}
thread::sleep(time::Duration::from_millis(100));
assert_eq!(num.load(Ordering::Relaxed), 1);
}