1
0
mirror of https://github.com/fafhrd91/actix-web synced 2025-07-03 09:36:36 +02:00

Compare commits

...

1139 Commits

Author SHA1 Message Date
4c0ebd55d3 prepare actix-http-test release 2019-04-16 11:02:26 -07:00
e7ec77aa81 update readme 2019-04-16 10:50:37 -07:00
ddfd7523f7 prepare awc release 2019-04-16 10:49:38 -07:00
2986077a28 no need for feature 2019-04-16 10:32:48 -07:00
3744957804 actix_http::encoding always available 2019-04-16 10:27:58 -07:00
420d3064c5 Add .peer_addr() #744 2019-04-16 10:11:38 -07:00
eb4f6b74fb Merge branch 'master' of github.com:actix/actix-web 2019-04-16 09:58:07 -07:00
a116c4c2c7 Expose peer addr via Request::peer_addr() and RequestHead::peer_addr 2019-04-16 09:54:02 -07:00
7f674febb1 add 422 to httpcodes.rs (#782) 2019-04-15 16:55:06 -07:00
14252f5ef2 use test::call_service 2019-04-15 09:09:21 -07:00
7a28b32f6d Rename test::call_success to test::call_service 2019-04-15 07:44:07 -07:00
09cdf1e302 Rename RouterConfig to ServiceConfig 2019-04-15 07:32:49 -07:00
1eebd47072 fix warnings 2019-04-14 21:00:16 -07:00
002c41a7ca update trust-dns 2019-04-14 20:45:44 -07:00
ab4fda6084 update tests 2019-04-14 20:20:33 -07:00
f9078d41cd add test::read_response; fix TestRequest::app_data() 2019-04-14 19:52:12 -07:00
4cc2b38059 added read_response_json for testing (#776)
* added read_response_json for testing

* cleaned up

* modied docs for read_response_json

* typo in doc

* test code in doc should compile now

* use type coercion in doc

* removed generic R, replaced with Request
2019-04-14 16:25:45 -07:00
d7040dc303 alpha.6 release 2019-04-14 08:09:32 -07:00
6bc1a0c76b Do not set default headers for websocket request 2019-04-14 07:43:53 -07:00
5bd5651faa Allow to use any service as default service 2019-04-13 22:25:00 -07:00
32ac159ba2 update migration 2019-04-13 16:51:41 -07:00
ee33f52736 make extractor config type explicit 2019-04-13 16:35:25 -07:00
4f30fa9d46 Remove generic type for request payload, always use default 2019-04-13 14:50:54 -07:00
043f6e77ae remove nested multipart support 2019-04-13 10:11:07 -07:00
48518df883 do not generate all docs; use docs.rs for 1.0 docs 2019-04-13 09:35:23 -07:00
1f2b15397d prepare alpha5 release 2019-04-12 14:00:45 -07:00
87167f6581 update actix-connect 2019-04-12 12:33:11 -07:00
b4768a8f81 add TestRequest::run(), allows to run async functions 2019-04-12 11:28:57 -07:00
3fb7343e73 provide during test request construction 2019-04-12 11:22:18 -07:00
5cfba5ff16 add FramedRequest builder for testing 2019-04-12 11:15:58 -07:00
67c34a5937 Add Debug impl for BoxedSocket 2019-04-11 16:01:54 -07:00
94d7a7f873 custom future for SendError service 2019-04-11 15:12:23 -07:00
d86567fbdc revert SendResponse::Error type 2019-04-11 14:18:58 -07:00
d115b3b3ed ws verifyciation takes RequestHead; add SendError utility service 2019-04-11 14:00:32 -07:00
6420a2fe1f update client example 2019-04-10 20:57:18 -07:00
0eed9e5257 add more migration 2019-04-10 20:51:57 -07:00
7801fcb993 update migration 2019-04-10 20:47:28 -07:00
e55be4dba6 add FramedRequest helper methods 2019-04-10 19:57:34 -07:00
12e1dad42e export TestBuffer 2019-04-10 19:43:09 -07:00
7cd59c38d3 rename framed App 2019-04-10 18:08:28 -07:00
8dc4a88aa6 add actix-framed 2019-04-10 15:06:27 -07:00
52aebb3bca fmt 2019-04-10 15:05:03 -07:00
6b42b2aaee remove framed for now 2019-04-10 12:55:56 -07:00
6ab9838977 added some error logging for extractors: Data, Json, Query, and Path (#765)
* added some error logging for extractors

* changed log::error to log::debug and fixed position of log for path

* added request path to debug logs
2019-04-10 12:45:13 -07:00
9d82d4dfb9 Fix body propagation in Response::from_error. #760 2019-04-10 12:43:31 -07:00
9bb40c249f add h1::SendResponse future; renamed to MessageBody::size 2019-04-10 12:24:17 -07:00
046b7a1425 Expand codegen to allow specify guards and async 2019-04-10 15:43:18 +03:00
c22a3a71f2 fix test 2019-04-08 19:07:11 -07:00
9c9940d88d update readme 2019-04-08 17:53:19 -07:00
561f83d044 add upgrade service support to h1 dispatcher 2019-04-08 17:51:14 -07:00
43d325a139 allow to specify upgrade service 2019-04-08 14:51:16 -07:00
0a6dd0efdf fix compression tests 2019-04-08 12:48:39 -07:00
b921abf18f set host header for http1 connections 2019-04-08 12:48:26 -07:00
9bcd5d6664 updated legacy code in call_success example (#762) 2019-04-08 11:20:46 -07:00
bc58dbb2f5 add async expect service test 2019-04-08 11:19:56 -07:00
b1547bbbb6 do not set default headers 2019-04-08 11:09:57 -07:00
a7fdac1043 fix expect service registration and tests 2019-04-08 10:31:29 -07:00
53da55aa3c alpha4 release 2019-04-07 23:42:05 -07:00
aa78565453 use objects pool for HttpRequest; optimize nested services call 2019-04-07 23:06:21 -07:00
75b213a6f0 refactor FromRequest trait 2019-04-07 14:43:07 -07:00
3c650ca194 remove buffer capacity for payload 2019-04-07 10:40:45 -07:00
219baf3323 remove PayloadWriter trait 2019-04-07 10:29:26 -07:00
ec09d6fbe6 optimize encode headers and body split 2019-04-07 10:03:38 -07:00
68d2203dd6 run travis with stable rust only 2019-04-07 08:17:29 -07:00
748289f0ff use custom headers map; more optimizations 2019-04-06 15:02:02 -07:00
4ef46e26f9 Merge branch 'master' of github.com:actix/actix-web 2019-04-06 08:13:14 -07:00
3872d3ba5a refactor h1 dispatcher 2019-04-06 08:12:58 -07:00
b1523ab78c started 1.0 migration guide (#758) 2019-04-06 07:39:20 -07:00
fbedaec661 add expect: 100-continue support #141 2019-04-05 16:46:44 -07:00
02fcaca3da add backward compatibility 2019-04-05 11:36:26 -07:00
18593d8476 updated Connector docs and renamed service() to finish() (#757)
* added Connector to actix-web::client namespace

* updated Connector, renaming service() to finish() and adding docs

* added doc for finish method on Connector
2019-04-05 11:34:27 -07:00
b6dacaa23a remove SendError and SendResponse services 2019-04-05 11:29:42 -07:00
f89321fd01 fix import 2019-04-05 10:50:11 -07:00
0d4a8e1b1c update actix-connect 2019-04-05 10:35:14 -07:00
162cd3eecd added Connector to actix-web::client namespace (#756) 2019-04-05 07:37:00 -07:00
a655bdac52 Fix clippy warning (#755) 2019-04-05 12:34:24 +03:00
309c480782 encoder sent uncompressed data before compressed 2019-04-04 15:03:40 -07:00
9c205f9f1d update tests for content-encoding 2019-04-04 14:00:56 -07:00
1f5c0f50f9 Add minimal std::error::Error impl for Error 2019-04-04 13:23:38 -07:00
d8bc66a18e Use thread pool for response body comression 2019-04-04 13:17:55 -07:00
bc834f6a03 remove some static contraints 2019-04-04 10:59:34 -07:00
dc7c3d37a1 upgrade router 2019-04-03 21:45:30 -07:00
1e2bd68e83 Render error and return as response body 2019-04-03 19:55:19 -07:00
954fe21751 set response error body 2019-04-03 19:07:25 -07:00
7d6085ddbd Add %U (URLPath) for logger (#752)
* Add %R (Route) for logger

* Requested Updates (Route => URLPath, %R => %U)
2019-04-03 17:41:42 -07:00
cef3dc3586 added app_data() method 2019-04-03 15:25:52 -07:00
237bfba1ed add App::configure() - allow to offload app configuration to different methods 2019-04-03 15:09:31 -07:00
dfa0abf5a5 Export IntoHeaderValue 2019-04-03 12:44:47 -07:00
e738361e09 move multipart support to separate crate 2019-04-03 12:28:58 -07:00
f56072954b remove PayloadBuffer 2019-04-03 03:20:20 -07:00
2a89b995aa do not cleanup travis build 2019-04-02 21:56:38 -07:00
442f5057dd alpha.3 release 2019-04-02 21:49:31 -07:00
19eef36f8f Merge branch 'tarpaulin' 2019-04-02 21:11:03 -07:00
51d5006ccf Detect socket disconnection during protocol selection 2019-04-02 20:50:25 -07:00
3aebe09e5c travis 2019-04-02 19:21:22 -07:00
4227cddd30 fix dev dependencies 2019-04-02 15:00:10 -07:00
db1f7651a3 more patch cratesio 2019-04-02 14:47:59 -07:00
00000fb316 mut obj 2019-04-02 14:27:54 -07:00
f100976ef0 rename close_connection to force_close 2019-04-02 14:08:30 -07:00
deac983bc7 fix test-server workspace setup 2019-04-02 14:04:28 -07:00
bca31eb7ad remove Deref 2019-04-02 13:35:01 -07:00
e282ef7925 return back consuming builder 2019-04-02 12:51:16 -07:00
49a499ce74 properly allocate read buffer 2019-04-02 11:11:32 -07:00
d067b1d5f1 do not use static 2019-04-02 10:53:44 -07:00
c27fbdc35f Preallocate read buffer for h1 codec, #749 2019-04-02 10:19:56 -07:00
1bd0995d7a remove unneded & 2019-04-01 18:00:38 -07:00
2d43489278 ClientRequest::json() accepts reference instead of object 2019-04-01 17:53:30 -07:00
89a0a50e14 Merge branch 'master' of github.com:actix/actix-web 2019-04-01 15:20:04 -07:00
38afc93304 Use non-consuming builder pattern for ClientRequest 2019-04-01 15:19:34 -07:00
03c84be1f2 Merge pull request #750 from Dowwie/master
added docs for wrap and wrap_fn
2019-04-01 17:37:04 -04:00
6d169f4c9c Merge branch 'master' of https://github.com/Dowwie/actix-web 2019-04-01 15:10:49 -04:00
3dd3f7bc92 updated scope wrap doc 2019-04-01 15:10:28 -04:00
e6936d9f73 Merge branch 'master' into master 2019-04-01 14:53:23 -04:00
03dfbdfcdd updated wrap and wrap fn descriptions, still requiring viable examples 2019-04-01 14:52:05 -04:00
5c4e4edda4 add ClientResponse::json() 2019-04-01 11:51:18 -07:00
c5fa6c1abe do not consume response 2019-04-01 11:29:50 -07:00
6c195d8521 add Derev<Target=RequestHead> for ClientRequest 2019-04-01 10:26:25 -07:00
96fd61f3d5 rust 1.31.0 compatibility 2019-04-01 10:26:09 -07:00
8800b8ef13 mentioned re-use in wrap doc 2019-04-01 09:59:21 -04:00
220c04b7b3 added docs for wrap and wrap_fn 2019-04-01 09:30:11 -04:00
34695f4bce rename test methods; update tests 2019-03-31 20:43:00 -07:00
15c5a3bcfb fix test 2019-03-31 18:57:54 -07:00
ab45974e35 add default handler 2019-03-31 18:19:18 -07:00
e4b3f79458 allocate enough space 2019-03-31 17:05:02 -07:00
ce8294740e fix tests with disabled features 2019-03-31 17:04:34 -07:00
ddf5089bff Warn when an unsealed private cookie isn't valid UTF-8 (#746) 2019-03-31 16:26:56 +03:00
7596d0b7cb fix fn_guard doc string 2019-03-30 20:48:00 -07:00
1a871d708e update guard doc test 2019-03-30 12:13:21 -07:00
351df84cca update stable release api doc link 2019-03-30 11:37:56 -07:00
6fcbe4bcda add fn_guard 2019-03-30 11:33:31 -07:00
457b75c995 update api docs; move web to submodule 2019-03-30 10:04:38 -07:00
724e9c2efb replace deprecated fn 2019-03-30 07:56:09 -07:00
2e159d1eb9 test-server: Request functions should accept path (#743) 2019-03-30 07:53:45 -07:00
a20b9fd354 prepare aplha2 release 2019-03-29 22:06:14 -07:00
d846328f36 fork cookie crate 2019-03-29 21:13:39 -07:00
193f8fb2d9 update tests 2019-03-29 18:51:07 -07:00
3a954298d7 Merge branch 'master' of github.com:actix/actix-web 2019-03-29 18:23:07 -07:00
3220777ff9 Added ws::Message::Nop, no-op websockets message 2019-03-29 18:22:49 -07:00
00526f60dc Impl BodyEncoding for Response (#740) 2019-03-29 16:29:11 -07:00
c126713f40 add rustls support to HttpServer 2019-03-29 16:28:32 -07:00
e9bbde6832 allow to override request's uri 2019-03-29 16:28:28 -07:00
5eb3f1154e revert 2019-03-29 14:27:22 -07:00
aebeb511cd explicit impl traits for ws connect 2019-03-29 14:26:11 -07:00
744d82431d add per request timeout 2019-03-29 14:07:37 -07:00
058b1d56e6 Export ws sub-module with websockets related types 2019-03-29 13:49:21 -07:00
709475b2bb multipart::Field renamed to MultipartField 2019-03-29 11:59:38 -07:00
19a0b8046b remove actix reference 2019-03-29 11:13:36 -07:00
1e7096a63a add request timeout 2019-03-28 22:33:41 -07:00
ea4d98d669 Session wide headers, basic and bearer auth 2019-03-28 21:48:35 -07:00
3b897da8e2 Do not use thread pool for decomression if chunk size is smaller than 2048 2019-03-28 21:15:26 -07:00
10b166404e Merge branch 'master' of github.com:actix/actix-web 2019-03-28 20:27:59 -07:00
80ff7d40a1 enable awc/ssl if ssl features is enabled 2019-03-28 20:27:47 -07:00
c4a8bbe47b fix the example in README.md (#739) 2019-03-28 20:03:17 -07:00
9710e9b01f Re-export actix_http::client::Connector 2019-03-28 14:46:33 -07:00
1d79f16529 update release api docs link 2019-03-28 14:30:38 -07:00
670a457013 fix docs.rs feature list 2019-03-28 14:28:59 -07:00
878f32c495 fix tests for no-default-features 2019-03-28 14:27:07 -07:00
a2c9ff3a33 back to development 2019-03-28 14:10:03 -07:00
9c198a0d29 alpha.1 release 2019-03-28 13:46:26 -07:00
9cca86e60d prepear actix-http release 2019-03-28 12:45:41 -07:00
605ce05127 App::enable_encoding() allows to enable compression and decompression 2019-03-28 12:32:59 -07:00
5795850bbb decompress payload in cpu threadpool 2019-03-28 11:08:24 -07:00
e84c95968f reuse PayloadBuffer from actix-http 2019-03-28 05:34:33 -07:00
6e0fe7db2d use actix-threadpool for blocking calls 2019-03-28 05:16:43 -07:00
4309d9b88c port multipart support 2019-03-28 05:04:39 -07:00
c59937784e add client websockets support 2019-03-27 18:53:19 -07:00
e254fe4f9c allow to override response body encoding 2019-03-27 11:29:31 -07:00
3edc515bac refactor RequestHead/ResponseHead 2019-03-27 10:38:01 -07:00
fb9c94c3e0 remove Backtrace from error 2019-03-27 09:31:07 -07:00
faa3ea8e5b rename BodyLength to BodySize 2019-03-27 09:24:55 -07:00
b6b37d3ea3 Add Client::request_from 2019-03-26 23:25:24 -07:00
b7570b2476 remove unused code 2019-03-26 22:33:01 -07:00
959aebb24f simplify TestResponse builder 2019-03-26 22:03:00 -07:00
d49a8ba53b add client TestResponse 2019-03-26 21:57:04 -07:00
5703bd8160 fix client cookies parsing 2019-03-26 21:31:18 -07:00
ab597dd98a Added HTTP Authentication for Client #540 2019-03-26 20:57:06 -07:00
1cca25c276 add client decompression support 2019-03-26 20:45:00 -07:00
2629699b62 rename flate2-c feature to flate2-zlib 2019-03-26 18:46:06 -07:00
1904b01fc0 add content-encoding decompression 2019-03-26 15:14:32 -07:00
9451ba71f4 update cargo files 2019-03-26 12:50:51 -07:00
50c0ddb3cd update tests 2019-03-26 12:31:51 -07:00
c7ad677804 Merge actix-http project 2019-03-26 11:54:35 -07:00
2c7da28ef9 move high level client code to awc crate 2019-03-26 11:43:22 -07:00
b254113d9f move high level client code from actix-http 2019-03-26 11:41:38 -07:00
999fa65efa Merge branch 'master' of github.com:actix/actix-web 2019-03-26 09:22:51 -07:00
cc24c77acc add Client::new() 2019-03-26 09:11:27 -07:00
254b61e800 Fix copy/paste mistake in error message (#733) 2019-03-26 09:07:19 -07:00
83d4447349 add http client 2019-03-25 21:58:01 -07:00
9037473e0f update client error 2019-03-25 21:52:45 -07:00
8d1195d8ac add async handler tests 2019-03-25 14:33:34 -07:00
e18227cc3d add wrap_fn to App and Scope 2019-03-25 13:43:02 -07:00
d30027ac5b Remove StaticFilesConfig (#731)
* Remove StaticFilesConfig

* Applying comments

* Impl Clone for Files<S>
2019-03-25 13:02:37 -07:00
86a21c956c rename .middleware to .wrap 2019-03-25 13:02:10 -07:00
939d2e745c rename Resource::middleware to Resource::wrap and add wrap_fn for fn middlewares 2019-03-25 12:47:58 -07:00
1970c99522 add session test 2019-03-24 20:21:20 -07:00
51e4dcf3b3 update test doc string 2019-03-24 17:13:17 -07:00
e37e81af0b simplify Payload extractor 2019-03-24 17:00:59 -07:00
ed322c175e update tests 2019-03-24 16:28:16 -07:00
b95e99a09e update changes 2019-03-24 16:17:59 -07:00
ffb3324129 do not use default resource from app, return 405 if no matching route found 2019-03-24 16:15:34 -07:00
9932a342ef export Scope 2019-03-24 11:59:35 -07:00
c1e8d8363c fix errhandlers doc string 2019-03-24 11:49:26 -07:00
913155d34c update doc strings 2019-03-24 11:47:23 -07:00
ede32c8b3f export errhandlers module 2019-03-24 11:32:30 -07:00
307b2e5b0e fix compress features 2019-03-24 11:29:35 -07:00
bc01d39d4d add error response test for cors 2019-03-23 22:03:40 -07:00
548f6f89bf allow to get app data via HttpRequest 2019-03-23 21:39:02 -07:00
5b06f2bee5 port cors middleware 2019-03-23 21:29:16 -07:00
60050307bd session feature is renamed to cookies 2019-03-23 11:18:31 -07:00
1e069bb843 Merge branch '1.0' 2019-03-23 10:16:32 -07:00
535b407ac0 make cookies optional 2019-03-23 10:06:54 -07:00
c5c7b244be cookie is optional 2019-03-23 09:40:20 -07:00
00b7dc7887 handle socket shutdown for h1 connections 2019-03-18 09:44:48 -07:00
8872f3b590 fix ws upgrade 2019-03-18 05:30:18 -07:00
fd86d73a03 fix response upgrade type 2019-03-18 05:26:12 -07:00
efe3025395 add handshake test 2019-03-17 22:57:27 -07:00
3301a46264 proper connection upgrade check 2019-03-17 22:56:13 -07:00
b0343eb22d simplify ws stream interface 2019-03-17 22:31:10 -07:00
6ab7665868 export ws module 2019-03-17 22:11:50 -07:00
fd3e351c31 add websockets context 2019-03-17 22:02:03 -07:00
f26d4b6a23 do not chunk websocket stream 2019-03-17 21:57:53 -07:00
85c2887b30 export ws::hash_key 2019-03-17 21:09:50 -07:00
88152740c6 move macros tests to codegen crate 2019-03-17 20:20:10 -07:00
a07ea00cc4 add basic test for proc macro 2019-03-17 13:55:03 -07:00
6b66681827 add basic actors integration 2019-03-17 13:47:20 -07:00
9bd0f29ca3 add tests for error and some responders 2019-03-17 10:11:10 -07:00
c14c66d2b0 add json extractor tests 2019-03-17 09:52:41 -07:00
2b5f9f0511 temp fix for tarpaulin 2019-03-17 08:52:03 -07:00
7435c5e9bf temp fix for tarpaulin 2019-03-17 01:49:00 -07:00
b550f9ecf4 update imports 2019-03-17 01:08:56 -07:00
fa66a07ec5 move httpmessage futures to actix-web 2019-03-17 01:02:51 -07:00
9012c46fe1 move payload futures from actix-http 2019-03-17 00:48:40 -07:00
fd141ef9b1 move json to actix-web 2019-03-16 22:10:15 -07:00
c80884904c move JsonBody from actix-http 2019-03-16 22:04:09 -07:00
725ee3d396 rename extract to types 2019-03-16 21:43:48 -07:00
4a4826b23a cleanup doc strings and clippy warnings 2019-03-16 21:35:02 -07:00
e396c90c9e update api doc 2019-03-16 21:13:16 -07:00
60386f1791 introduce RouteData extractor 2019-03-16 21:09:11 -07:00
6afcecdb5f Merge branch '1.0' of github.com:actix/actix-web into 1.0 2019-03-16 20:17:48 -07:00
b1e267bce4 rename State to a Data 2019-03-16 20:17:27 -07:00
d93fe157b9 use better name Route::data instead of Route::config 2019-03-16 11:58:01 -07:00
414614e1b5 change marco import (#727) 2019-03-15 21:08:39 -07:00
ce4a2629f3 update actix-connect 2019-03-14 22:56:06 -07:00
15ba40d3ab fix non ssl connector 2019-03-14 13:08:05 -07:00
76bb30dc3a fix names 2019-03-14 13:06:29 -07:00
1f9467e880 update tests 2019-03-14 12:01:35 -07:00
b8bfd29d2c use Uri as client connect message 2019-03-14 11:52:52 -07:00
bf8262196f feat: enable use of patch as request method (#718) 2019-03-14 11:36:10 +03:00
d2c755bb47 update client connector 2019-03-13 22:57:28 -07:00
3a24a75d13 update dep 2019-03-13 16:56:11 -07:00
033a8d890c update actix connect 2019-03-13 15:57:33 -07:00
1941aa0217 use actix-connect crate 2019-03-13 14:41:40 -07:00
17ecdd63d2 httpresponse: add constructor for HttpResponseBuilder (#697) 2019-03-13 17:20:18 +03:00
86405cfe7a more tests 2019-03-12 22:57:09 -07:00
28f01beaec update deps 2019-03-12 17:06:08 -07:00
f627d01055 update actix-server 2019-03-12 17:04:08 -07:00
e271d4e47a Merge branch 'master' of github.com:actix/actix-http 2019-03-12 16:55:38 -07:00
402a40ab27 update actix-server dep 2019-03-12 16:55:16 -07:00
7242d96701 map BlockingError 2019-03-11 23:19:05 -07:00
a2c4639074 move blocking code to actix-rt 2019-03-11 23:11:51 -07:00
00d47acedc Update README.md 2019-03-11 17:56:48 -07:00
409888fcd5 remove debug print, remove unused flags 2019-03-11 16:47:12 -07:00
e15e4f18fd update tests 2019-03-11 16:42:33 -07:00
eae48f9612 use server backlog 2019-03-11 15:26:05 -07:00
ad43ca735b update server service requirenments 2019-03-11 15:09:42 -07:00
cc7f6b5eef Fix preflight CORS header compliance; refactor previous patch. (#717) 2019-03-11 07:26:54 +03:00
9680423025 Add more tests for route 2019-03-10 18:35:15 -07:00
b8829bbf22 add identity middleware tests 2019-03-10 17:16:32 -07:00
0f0d6b65ca update service request/response location 2019-03-10 17:16:28 -07:00
6436004194 set test cookie if it is not empty 2019-03-10 17:06:43 -07:00
50a0cb5653 do no move self 2019-03-10 17:02:14 -07:00
615fbb49bd support cookies in TestRequest 2019-03-10 17:00:03 -07:00
4d96abb639 use actix_web::Error for middleware errors 2019-03-10 16:35:38 -07:00
d755772039 add From impls for ResponseBuilder 2019-03-10 15:30:31 -07:00
79875ea039 update deps 2019-03-10 14:22:53 -07:00
039efc5703 move tests to different mods 2019-03-10 11:04:50 -07:00
b6c1135798 hide blocking mod 2019-03-10 10:56:53 -07:00
16c42be4a2 simplify extractor configuration, config is optional now 2019-03-10 10:53:56 -07:00
ee8725b581 move extractors to separate submod 2019-03-10 10:01:24 -07:00
49d65fb07a move extract to submodule 2019-03-10 09:34:25 -07:00
9b8812423c reexport Server controller form actix-server 2019-03-10 09:20:58 -07:00
3a2035a121 fix doc tests 2019-03-09 21:15:26 -08:00
be9031c55e update doc api 2019-03-09 20:48:05 -08:00
12f0c78091 port identity middleware 2019-03-09 20:40:09 -08:00
134863d5c8 move middlewares 2019-03-09 18:05:50 -08:00
513ce0b08d add json and form client request's method 2019-03-09 17:42:35 -08:00
85664cc6f7 update deps 2019-03-09 14:56:18 -08:00
6c4be45787 update deps 2019-03-09 14:33:33 -08:00
d2dba028f6 fix dependency link 2019-03-09 14:07:43 -08:00
54678308d0 propogate app config with http request; add tests for url_for 2019-03-09 14:06:24 -08:00
9c7056e9b8 fix connector 2019-03-09 13:38:56 -08:00
c0ce7f0bae update http service usage; add app host 2019-03-09 10:53:00 -08:00
d026821924 unify service builders 2019-03-09 10:39:06 -08:00
fde55ffa14 revert generic request parameter for service; support ServerConfig as new factory config 2019-03-09 09:49:11 -08:00
aadcdaa3d6 add resource map, it allow to check if router has resource and it allows to generate urls for named resources 2019-03-09 07:39:34 -08:00
ca73f178c9 revert generic service request; add ServerConfig to service factories 2019-03-09 07:37:23 -08:00
e324522389 listen method has different signature 2019-03-08 22:47:49 -08:00
2f6df11183 do not execute blocking fn if result is not required 2019-03-07 19:31:17 -08:00
eef687ec80 remove unneeded methods 2019-03-07 15:51:24 -08:00
88e5059910 add doc string to guards 2019-03-07 15:37:39 -08:00
b6b2eadb3a rename blocking fn 2019-03-07 14:41:43 -08:00
c2a350b33f export blocking via web module 2019-03-07 14:40:20 -08:00
0e57b4ad61 export extractor configs via web module 2019-03-07 14:01:52 -08:00
b211966c28 Payload extractor 2019-03-07 13:33:40 -08:00
d77954d19e fix files test 2019-03-07 12:32:40 -08:00
ceb6d45bf2 reexpost extractors in web module 2019-03-07 11:43:46 -08:00
22708e78a9 added proc-macros for route registration 2019-03-07 11:09:42 -08:00
1151b5bf7c fix crate name 2019-03-06 23:43:47 -08:00
e56691bcf2 rename to Files 2019-03-06 23:39:08 -08:00
6e638129c5 use generic HttpService 2019-03-06 23:06:14 -08:00
6d639ae3df allow to create http services with config 2019-03-06 22:59:56 -08:00
3b069e0568 added combined http1/2 service 2019-03-06 22:56:34 -08:00
e25483a0d5 fix warnings 2019-03-06 21:12:35 -08:00
60c048c8cd fix nested resources 2019-03-06 19:27:18 -08:00
244fff9e0a added Logger middleware 2019-03-06 19:19:27 -08:00
561a89b8b3 copy logger 2019-03-06 17:33:12 -08:00
fe22e83144 refactor service registration process; unify services and resources 2019-03-06 15:47:15 -08:00
b689bb9260 add failure support 2019-03-06 11:45:33 -08:00
5cde4dc479 update actix-rt 2019-03-06 10:41:07 -08:00
ad08e856d7 update actix-rt 2019-03-06 10:30:17 -08:00
db39a604ae implement ResponseError trait for BlockingError 2019-03-06 10:03:37 -08:00
db566a634c make State type Send compatible 2019-03-06 10:03:18 -08:00
3fc28c5d07 simplify StaticFile constructor, move HttpRange to separate module 2019-03-06 09:27:02 -08:00
ceca96da28 Added HTTP Authentication for Client (#540) 2019-03-06 12:56:12 +03:00
6efc3438b8 refactor and enable some tests for staticfiles 2019-03-05 22:10:08 -08:00
889d67a356 add Stream impl for ResponseBody 2019-03-05 21:19:12 -08:00
34c8b95a35 allow to extract body from response 2019-03-05 21:15:18 -08:00
1a80b70868 add Responder impl for InternalError 2019-03-05 19:41:50 -08:00
0de47211b2 tune App::default_resource signature 2019-03-05 19:30:44 -08:00
f71354783e update HttpMessage impls 2019-03-05 19:10:45 -08:00
d85468f7e1 do not expose headers_mut via HttpMessage 2019-03-05 19:07:07 -08:00
81273f71ef update tests 2019-03-05 19:03:59 -08:00
0cf73f1a04 move session to different folder 2019-03-05 18:52:29 -08:00
143ef87b66 add session and cookie session backend 2019-03-05 18:47:18 -08:00
496ee8d039 remove more MessageBody constraints from Response 2019-03-05 18:14:30 -08:00
0d2116156a Messagebody constraint is not required from Response::into_body 2019-03-05 17:24:24 -08:00
200cae19a9 add HttpMessage impl &mut T 2019-03-05 14:39:06 -08:00
96477d42cb extend HttpMessage trait, add api to work with requests cookies 2019-03-05 13:16:26 -08:00
01329af1c2 fix non ssl code 2019-03-05 10:18:46 -08:00
6457996cf1 move session to separate crate 2019-03-05 10:13:33 -08:00
03248028a9 update actix-service 2019-03-05 10:08:08 -08:00
ce0b172598 update actix-service 2019-03-05 09:30:11 -08:00
b6fe1dacf2 update middleware impl 2019-03-04 21:42:51 -08:00
42f030d3f4 Ensure that Content-Length zero is specified in empty request 2019-03-05 08:37:15 +03:00
3a456ec148 update actix-service dependency 2019-03-04 20:46:33 -08:00
65a313c78b update utils dep 2019-03-04 19:51:09 -08:00
2e79562c9d add HttpServer type 2019-03-04 16:29:03 -08:00
a88b3b090d allow to specify service config for h1 service 2019-03-04 15:58:39 -08:00
bd4124587a provide block_on function for testing purpose 2019-03-04 13:25:35 -08:00
e442ddb167 allow scope level guards 2019-03-04 11:47:53 -08:00
5c61321565 fix state factory support, tests for state and state factory 2019-03-03 21:40:03 -08:00
34171fa7f5 add scopes 2019-03-03 21:02:01 -08:00
8502c32a3c re-enable extractor tests 2019-03-03 15:32:47 -08:00
360082f99f update api docs 2019-03-03 14:45:56 -08:00
e50d4c5e0e rename extractor module to extract, re-enable doc tests 2019-03-03 13:53:31 -08:00
237677be15 rename filter to guard 2019-03-03 12:09:38 -08:00
b81ae899f6 better naming 2019-03-03 08:24:09 -08:00
015364edf8 fix travis config 2019-03-03 08:00:12 -08:00
f90ca868ca update tests 2019-03-03 01:12:06 -08:00
a8f3dec527 use tarpaulin from cache 2019-03-03 01:03:28 -08:00
6df85e32df added extractor configuration system 2019-03-03 00:57:48 -08:00
08fcb6891e use specific nightly version for travis 2019-03-02 22:33:46 -08:00
b320dc127a remove unused code 2019-03-02 22:22:45 -08:00
115b30d9cc add state example 2019-03-02 22:11:24 -08:00
d5c54a1867 update extractor tests 2019-03-02 22:03:45 -08:00
352e7b7a75 update tests for defaultheaders middleware 2019-03-02 21:35:31 -08:00
b535adf637 add IntoFuture impl for Response and ResponseBuilder 2019-03-02 21:22:01 -08:00
8103d33270 use custom request for FromRequest trait 2019-03-02 19:19:56 -08:00
2d0495093c add Payload::take method 2019-03-02 18:37:09 -08:00
e4198a037a add TestServiceRequest builder 2019-03-02 16:24:14 -08:00
00ea195601 TestRequest::take public 2019-03-02 16:04:43 -08:00
0081b9d446 improve ergomonics of TestRequest 2019-03-02 15:59:05 -08:00
de9b38295f update deps 2019-03-02 15:08:10 -08:00
9394a4e2a5 cleanup dependencies 2019-03-02 14:07:21 -08:00
3454812b68 rename actix-web-fs crate 2019-03-02 13:59:12 -08:00
75fbb97480 update new transform trait 2019-03-02 13:57:00 -08:00
cc20fee628 add request chain services 2019-03-02 11:53:05 -08:00
fdf3011837 add responder for unit type 2019-03-02 09:05:07 -08:00
bc3c29c398 update version 2019-03-02 00:04:39 -08:00
e6d04d24cc move fs to separate crate 2019-03-01 23:59:44 -08:00
2d7293aaf8 copy actix-web2 2019-03-01 22:51:32 -08:00
5fff07402e downgrade tarpaulin 2019-03-01 21:36:37 -08:00
650474ca39 choose openssl version for travis 2019-03-01 21:02:56 -08:00
38c86d4683 update tarpaulin travis config 2019-03-01 20:33:31 -08:00
6d11ee683f fixing little typo in docs (#711) 2019-03-01 11:34:58 +03:00
80d4cbe301 Add change notes for new HttpResponseBuilder 2019-02-27 21:37:20 +03:00
69d710dbce Add insert and remove() to response builder (#707) 2019-02-27 15:52:42 +03:00
b80ee71785 use new new service api 2019-02-22 14:21:35 -08:00
2f89b12f4f remove more response containts 2019-02-20 21:05:37 -08:00
60a8da5c05 remove Response constraint 2019-02-20 21:02:23 -08:00
7f749ac9cc add missing end of line 2019-02-18 22:34:22 -08:00
781f1a3fef do not skip content length is no chunking is selected 2019-02-18 22:20:00 -08:00
c8713d045c poll payload again if framed object get flushed during same iteration 2019-02-18 21:41:38 -08:00
842da939dc fix chunked transfer encoding handling 2019-02-18 20:24:50 -08:00
d180b2a1e3 update tests 2019-02-18 18:46:30 -08:00
037c3da172 enable ssl for connector 2019-02-18 18:40:40 -08:00
e6e83ea57e add Response::map_body 2019-02-18 17:01:35 -08:00
118606262b refactor payload handling 2019-02-13 13:52:11 -08:00
0059a55dfb Fix typo 2019-02-13 14:31:28 +03:00
8d4ce0c956 export PayloadStream 2019-02-12 11:09:58 -08:00
a41459bf69 make payload generic 2019-02-12 11:07:42 -08:00
32021532c3 export Payload type 2019-02-12 09:55:29 -08:00
f9724fa0ec add ErrorResponse impl for TimeoutError 2019-02-11 09:54:41 -08:00
e178db7f74 fix test 2019-02-09 21:32:44 -08:00
1af149b9e6 remove Clone constraint from handler service 2019-02-09 20:27:39 -08:00
a66d8589c2 add Extensions::contains method 2019-02-09 10:45:35 -08:00
6a343fae06 simplify Message type 2019-02-09 10:33:49 -08:00
f3ed1b601e Change service response to Into<Response> 2019-02-09 08:44:22 -08:00
c695358bcb Ignored the If-Modified-Since if If-None-Match is specified (#680) (#692) 2019-02-09 00:33:00 +03:00
ed7ca7fe07 make Message clonable and expose as public 2019-02-07 21:50:20 -08:00
b0e36fdcf9 simplify Message api 2019-02-07 21:19:10 -08:00
a7a2d4cf5c fix warns 2019-02-07 19:53:48 -08:00
2a6e4dc7ab use non mutable self for HttpMessage::payload() for ergonomic reasons 2019-02-07 19:26:12 -08:00
5575ee7d2d use same payload type for h1 and h2 2019-02-07 13:41:50 -08:00
7d49a07f91 add h1/h2 payload 2019-02-07 13:39:15 -08:00
c4596b0bd6 add headers from actix-web 2019-02-07 13:24:24 -08:00
cd83553db7 simplify payload api; add missing http error helper functions 2019-02-07 11:37:33 -08:00
b018e4abaf Fixes TestRequest::with_cookie panic 2019-02-07 07:55:27 +03:00
fcace161c7 fix manifest features 2019-02-06 12:22:40 -08:00
55a29d3778 add h2 server support 2019-02-06 11:44:15 -08:00
346d85a884 Serve static file directly instead of redirecting (#676) 2019-02-04 13:20:46 +03:00
ef5b54a481 use released service crate 2019-02-03 14:05:44 -08:00
c9bb2116fe update actix-utils 2019-02-03 10:50:29 -08:00
e70c7f2a5d upgrade derive-more 2019-02-01 20:22:43 -08:00
3269e35722 migrate to actix-service 0.2 2019-02-01 20:18:44 -08:00
76866f054f move service to submodule; update travis config 2019-01-30 10:29:15 -08:00
3e6bdbd9ee rename trait 2019-01-29 10:34:27 -08:00
9a4eb5a848 update readme 2019-01-29 10:17:38 -08:00
4217894d48 cleaup warnings 2019-01-29 10:14:00 -08:00
4a388d7ad9 add client http/2 support 2019-01-28 20:41:09 -08:00
9968afe4a6 Use NamedFile with an existing File (#670) 2019-01-28 08:07:28 +03:00
12fb94204f use hashbrown instead of std HashMap 2019-01-27 11:42:41 -08:00
c3d3e8b465 move TestServer to separate crate 2019-01-27 11:07:48 -08:00
42277c5c8f update deps 2019-01-26 22:09:26 -08:00
f5bec968c7 Bump v_htmlescape version to 0.4 2019-01-25 11:31:42 +03:00
a534fdd125 Add io handling for ECONNRESET when data has already been received 2019-01-20 08:45:33 +03:00
3431fff4d7 Fixed example in client documentation. This closes #665. 2019-01-14 07:44:30 +03:00
d6df2e3399 Fix HttpResponse doc spelling "os" to "of" 2019-01-11 08:45:15 +03:00
1fbb52ad3b 0.7.18 Bump 2019-01-10 17:05:18 +03:00
e5cdd22720 Fix test server listener thread leak (#655) 2019-01-08 10:42:22 -08:00
4f2e970732 Tidy up CHANGES.md 2019-01-08 10:49:03 +03:00
4d45313f9d Decode special characters when handling static files 2019-01-08 10:46:58 +03:00
55a2a59906 Improve change askama_escape in favor of v_htmlescape (#651) 2019-01-03 22:34:18 +03:00
61883042c2 Add with-cookie init-method for TestRequest (#647) 2019-01-02 13:24:08 +03:00
799c6eb719 0.7.17 Bump 2018-12-25 16:28:36 +03:00
037a1c6a24 Bump min version of rustc
Due to actix & trust-dns requirement
2018-12-24 21:17:09 +03:00
bfdf762062 Only return a single Origin value (#644)
Only return a single origin if matched.
2018-12-24 21:16:07 +03:00
477bf0d8ae Send HTTP/1.1 100 Continue if request contains expect: continue header #634 2018-12-23 10:19:12 -08:00
e9fe3879df Support custom content types in JsonConfig 2018-12-23 08:27:47 +03:00
f2251b8059 Merge branch 'master' of github.com:fafhrd91/actix-http 2018-12-19 18:35:09 -08:00
cc74435b01 drop failure crate 2018-12-19 18:34:56 -08:00
3bd5167ac2 Merge pull request #10 from DoumanAsh/http1_case
H1 decoder should ignore headers case
2018-12-16 07:44:40 -08:00
67df9399df H1 decoder should ignore headers case 2018-12-16 18:43:11 +03:00
1a940d4c18 H1 decoded should ignore header cases 2018-12-16 18:34:32 +03:00
e8bdcb1c08 Update min version of http
Closes #630
2018-12-15 09:26:56 +03:00
b1001b80b7 upgrade actix-service dependency 2018-12-12 18:39:01 -08:00
46db09428c Prepare release 0.7.16 2018-12-11 21:04:05 +03:00
1c60992723 use released crates 2018-12-11 09:29:12 -08:00
90eef31cc0 impl ResponseError for SendError when possible (#619) 2018-12-11 19:37:52 +03:00
aaae368ed9 use new actix crates 2018-12-10 18:08:33 -08:00
86af02156b add impl FromRequest for Either<A,B> (#618) 2018-12-10 19:02:05 +03:00
9f4d48f7a1 update tests 2018-12-06 15:03:01 -08:00
e9121025b7 convert to 2018 edition 2018-12-06 14:32:52 -08:00
ac9fc662c6 Bump version to 0.7.15 2018-12-05 18:27:06 +03:00
0745a1a9f8 Remove usage of upcoming keyword async
AsyncResult::async is replaced with AsyncResult::future
2018-12-05 18:23:04 +03:00
b1635bc0e6 Update some dependencies (#612)
* Update rand to 0.6

* Update parking_lot to 0.7

* Update env_logger to 0.6
2018-12-04 09:58:22 +03:00
08c7743bb8 Add set_mailbox_capacity() function 2018-12-02 08:40:09 +03:00
68c5d6e6d6 impl From<Cow<'static, [u8]>> for Binary (#611)
impl `From` for `Cow<'static, [u8]>`  and `From<Cow<'static, str>>` for `Binary`
2018-12-02 08:32:55 +03:00
c0f8bc9e90 fix ssl support 2018-11-30 16:04:33 -08:00
5003c00efb use new Service and NewService traits 2018-11-30 11:57:57 -08:00
d269904fbf add cause for nested errors 2018-11-28 09:10:13 -10:00
06387fc778 display parse error for ws client errors 2018-11-28 09:02:31 -10:00
617b8557e1 Merge pull request #9 from alishir/new-example
echo example with `impl Future`
2018-11-27 20:17:32 -10:00
4028f6f6fd http crate removed, cargo fmt 2018-11-28 09:42:04 +03:30
397804a786 echo example with impl Future 2018-11-28 09:15:08 +03:30
756bf0af58 Merge pull request #5 from alishir/echo-example
Two other simple examples.
2018-11-25 20:50:17 -10:00
f1bfdc7395 Merge branch 'master' of https://github.com/fafhrd91/actix-http 2018-11-25 20:14:58 -10:00
9c038ee189 allow to use Uri for client request 2018-11-25 20:14:42 -10:00
ca1b460924 comments aligned. 2018-11-25 05:48:33 +03:30
c386353337 decode reserved characters when extracting path with configuration (#577)
* decode reserved characters when extracting path with configuration

* remove useless clone

* add a method to get decoded parameter by name
2018-11-24 16:54:11 +03:00
7a97de3a1e update readme. 2018-11-24 17:17:34 +03:30
d5b2640342 add framed_hello.rs 2018-11-24 17:08:17 +03:30
c3c2286e3a An other hello word example and update sample in README.md 2018-11-24 17:07:30 +03:30
d5ca6e21e2 simple echo server. 2018-11-24 11:29:14 +03:30
9aab382ea8 Allow user to provide addr to custom resolver
We basically swaps Addr with Recipient to enable user to use custom resolver
2018-11-23 15:36:12 +03:00
7ef6eab9af Merge pull request #3 from alishir/examples
hello-world example added.
2018-11-22 19:24:50 -10:00
41d68c87d9 hello-world example added. 2018-11-23 07:42:40 +03:30
1a322966ff handle response errors 2018-11-21 07:49:24 -08:00
389cb13cd6 Export PathConfig and QueryConfig
Closes #597
2018-11-20 23:06:38 +03:00
ab3e12f2b4 set server response version 2018-11-20 11:23:05 -08:00
186d3d727a add kee-alive tests 2018-11-20 10:55:50 -08:00
6a93178479 Complete error helper functions. 2018-11-20 08:07:46 +03:00
e1fc6dea84 restore execute method 2018-11-19 16:39:40 -08:00
6b60c9e230 add debug impl for H1ServiceResult 2018-11-19 16:11:58 -08:00
3901239128 unify requedt/response encoder 2018-11-19 14:57:12 -08:00
1ca6b44bae add TestServer 2018-11-18 21:48:20 -08:00
18fcddfd63 remove backtrace dep 2018-11-18 20:25:59 -08:00
7d66430324 move url module to different crate 2018-11-18 20:08:43 -08:00
22d4523c93 update actix-net 2018-11-18 18:31:44 -08:00
7d3adaa6a8 replace message flags with ConnectionType 2018-11-18 18:17:38 -08:00
adad203314 refactor encoder/decoder impl 2018-11-18 17:52:56 -08:00
8fea1367c7 re-introduce Body type, use Body as default body type for Response 2018-11-18 13:48:42 -08:00
7fed50bcae refactor response body management 2018-11-17 20:21:28 -08:00
e73a97884a do not allow to set server response version 2018-11-17 09:03:35 -08:00
f0bd4d868e simplify server response type 2018-11-17 08:56:40 -08:00
3a4b16a6d5 use BodyLength for request and response body 2018-11-16 21:30:37 -08:00
aa20e2670d refactor h1 dispatcher 2018-11-16 21:09:33 -08:00
625469f0f4 refactor decoder 2018-11-16 19:28:07 -08:00
3b7bc41418 use RequestHead for Request 2018-11-15 22:34:29 -08:00
6d9733cdf7 define generic client Connection trait 2018-11-15 11:10:23 -08:00
acd42f92d8 remove debug print 2018-11-14 19:08:52 -08:00
6e7560e287 SendResponse service sends body as well 2018-11-14 18:57:58 -08:00
cd9901c928 prepare release 2018-11-14 16:24:01 -08:00
03ad9a3105 simplify client decoder 2018-11-14 10:52:40 -08:00
6297fe0d41 refactor client response payload handling 2018-11-14 09:38:16 -08:00
550c5f55b6 add simple http client 2018-11-13 22:59:00 -08:00
537144f0b9 add http client connector service 2018-11-11 23:12:54 -08:00
1ef0eed0bd do not stop on keep-alive timer if sink is not completly flushed 2018-11-08 20:46:13 -08:00
b25b083866 do not stop on keep-alive timer if sink is not completly flushed 2018-11-08 20:45:48 -08:00
dea39030bc properly handle upgrade header if content-length header is set 2018-11-08 20:38:40 -08:00
61b1030882 Fix websockets connection drop if request contains content-length header #567 2018-11-08 20:35:47 -08:00
7065c540e1 set nodelay on socket #560 2018-11-08 16:29:43 -08:00
aed3933ae8 Merge branch 'master' of github.com:actix/actix-web 2018-11-08 16:15:45 -08:00
5b7740dee3 hide ChunkedReadFile 2018-11-08 16:12:16 -08:00
1a0bf32ec7 Fix unnecessary owned string and change htmlescape in favor of askama_escape (#584) 2018-11-08 16:08:06 -08:00
9ab586e24e update actix-net dep 2018-11-08 16:06:23 -08:00
6a1d560f22 fix keep-alive timer reset 2018-11-08 09:30:53 -08:00
62f1c90c8d update base64 dep 2018-11-07 21:18:40 -08:00
2677d325a7 fix keep-alive timer reset 2018-11-07 21:09:33 -08:00
8e354021d4 Add SameSite option to identity middleware cookie (#581) 2018-11-07 23:24:06 +03:00
f1587243c2 fix body decoding 2018-11-05 19:32:03 -08:00
3b536ee96c Use old clippy attributes syntax (#562) 2018-11-01 11:14:48 +03:00
da82e24954 render error message as body 2018-10-30 11:55:17 -07:00
79bcbb8a10 use error message 2018-10-30 11:50:30 -07:00
148cf73003 allow to create response with error message 2018-10-30 11:46:44 -07:00
c2540cc59b clippy warnings 2018-10-29 16:39:46 -07:00
cfd9a56ff7 Add async/await ref 2018-10-28 09:24:19 -07:00
5f91f5eda6 Correct IoStream::set_keepalive for UDS (#564)
Enable uds feature in tests
2018-10-26 10:59:06 +03:00
540ad18432 add Debug impl 2018-10-24 16:48:45 -07:00
cd0223e8b7 update Connector usage 2018-10-23 22:41:30 -07:00
bc6e62349c update deps; export api 2018-10-23 21:44:20 -07:00
4260692034 add DefaultClient type alias 2018-10-22 18:52:40 -07:00
09c94cb06b add client http codec; websockets client 2018-10-22 18:18:05 -07:00
9b94eaa6a8 ws services 2018-10-22 09:59:20 -07:00
42d5d48e71 add a way to configure error treatment for Query and Path extractors (#550)
* add a way to configure error treatment for Query extractor

* allow error handler to be customized for Path extractor
2018-10-20 06:43:43 +03:00
960274ada8 Refactoring of server output to not exclude HTTP_10 (#552) 2018-10-19 07:52:10 +03:00
f383f618b5 Fix typo in error message (#554) 2018-10-18 21:27:31 +03:00
20c693b39c rename service 2018-10-15 16:46:13 -07:00
3c402a55da added H1SimpleService 2018-10-15 15:56:47 -07:00
c04b4678f1 bump version 2018-10-14 08:10:41 -07:00
dd948f836e HttpServer not sending streamed request body on HTTP/2 requests #544 2018-10-14 08:08:12 -07:00
d39c018c93 do not handle upgrade and connect requests 2018-10-13 23:57:31 -07:00
63a443fce0 Correct build script 2018-10-13 10:05:21 +03:00
d145136e56 Add individual check for TLS features 2018-10-13 09:54:03 +03:00
b960b5827c export Uri 2018-10-11 20:15:10 -07:00
32145cf6c3 fix after update tokio-rustls (#542) 2018-10-11 11:05:07 +03:00
06addd5523 update deps 2018-10-10 13:23:25 -07:00
47b47af01a refactor ws codec 2018-10-10 13:20:00 -07:00
ec8aef6b43 update dep versions 2018-10-10 08:36:16 -07:00
f45038bbfe remove unused code 2018-10-09 13:23:37 -07:00
c63838bb71 fix 204 support for http/2 2018-10-09 13:12:49 -07:00
4d17a9afcc update version 2018-10-09 11:42:52 -07:00
65e9201b4d Fixed panic during graceful shutdown 2018-10-09 11:35:57 -07:00
4a167dc89e update readme example 2018-10-09 10:47:41 -07:00
1407bf4f7f simplify h1 codec messages 2018-10-09 10:39:55 -07:00
c3ad516f56 disable shutdown atm 2018-10-09 09:45:24 -07:00
cb78d9d41a use actix-net release 2018-10-08 22:04:53 -07:00
93b1c5fd46 update deps 2018-10-08 21:58:37 -07:00
fd5da5945e update appveyor config 2018-10-08 21:23:52 -07:00
2b4870e65b fix tests on stable 2018-10-08 16:10:07 -07:00
f99a723643 add Default impl for ServiceConfig 2018-10-08 15:52:12 -07:00
3984ad45df separate ResponseLength::Zero is not needed 2018-10-08 15:33:38 -07:00
4e7fac08b9 do not override content-length header 2018-10-08 15:30:59 -07:00
805e7a4cd0 impl response body support 2018-10-08 15:24:51 -07:00
07f6ca4b71 Merge branch 'master' of github.com:actix/actix-web 2018-10-08 13:06:49 -07:00
03d988b898 refactor date rendering 2018-10-08 10:16:19 -07:00
431e33acb2 add Date header to response 2018-10-08 10:14:29 -07:00
30db78c19c use TakeItem instead of TakeRequest 2018-10-08 07:55:01 -07:00
cfad5bf1f3 enable slow request timeout for h2 dispatcher 2018-10-08 07:47:42 -07:00
8acf9eb98a better keep-alive handling 2018-10-07 10:09:48 -07:00
13193a0721 refactor http/1 dispatcher 2018-10-07 09:59:40 -07:00
9c4a55c95c simplify H1Service configuration 2018-10-07 08:28:38 -07:00
8d85c45c1d simplify error handling 2018-10-07 00:04:38 -07:00
b0ca6220f0 refactor te encoding 2018-10-06 22:36:57 -07:00
dda5b399ca add content-length test 2018-10-06 21:32:01 -07:00
25af82c45a cleanup dependencies 2018-10-06 21:17:27 -07:00
87b83a3403 update tests, remove unused deps 2018-10-06 21:07:32 -07:00
c368abdf5f remove Json type 2018-10-06 20:34:19 -07:00
ee62814216 split request decoder and payload decoder 2018-10-06 20:31:22 -07:00
10678a22af test content length (#532) 2018-10-06 08:17:20 +03:00
lzx
7ae5a43877 httpresponse.rs doc fix (#534) 2018-10-06 08:16:12 +03:00
c0699a070e add TakeRequest service; update ws test case 2018-10-05 15:40:56 -07:00
7e135b798b add websocket transport and test 2018-10-05 14:30:40 -07:00
5c0a2066cc refactor ws to a websocket codec 2018-10-05 12:47:22 -07:00
8c2244dd88 rename HttpResponse 2018-10-05 11:04:59 -07:00
d53f3d7187 re-enable websockets 2018-10-05 10:20:49 -07:00
2e27d77740 fix connection keepalive support 2018-10-05 10:03:10 -07:00
fbf67544e5 remove unused code 2018-10-05 08:03:25 -07:00
c24a8f4c2d remove high level apis 2018-10-05 07:02:09 -07:00
c99f9eaa63 Update test_h1v2.rs 2018-10-05 05:59:02 -07:00
caa5a54b8f fix test and remove unused code 2018-10-04 23:46:43 -07:00
7fdc18f9b9 calculate response parameters 2018-10-04 23:39:11 -07:00
e78014c65a fix travis link in readme 2018-10-04 21:19:43 -07:00
df50e636f1 update readme 2018-10-04 21:18:36 -07:00
99a915e668 disable gh-pages update 2018-10-04 21:15:24 -07:00
829dbae609 cleanups and tests 2018-10-04 21:14:18 -07:00
4ca711909b refactor types 2018-10-04 20:02:10 -07:00
b15b2dda22 remove ServerSettings 2018-10-04 17:34:57 -07:00
6aa2de7b8d remove actix-web artifacts 2018-10-04 17:00:27 -07:00
13b0ee7355 stopping point 2018-10-04 16:22:00 -07:00
1e1a4f846e use actix-net cell features 2018-10-02 22:23:51 -07:00
49eea3bf76 travis config 2018-10-02 20:22:51 -07:00
b0677aa029 fix stable compatibility 2018-10-02 19:42:24 -07:00
401ea574c0 make AcceptorTimeout::new public 2018-10-02 19:31:30 -07:00
bbcd618304 export AcceptorTimeout 2018-10-02 19:12:08 -07:00
1f68ce8541 fix tests 2018-10-02 19:05:58 -07:00
2710f70e39 add H1 transport 2018-10-02 17:30:29 -07:00
ae5c4dfb78 refactor http channels list; rename WorkerSettings 2018-10-02 15:25:32 -07:00
d7379bd10b update server ssl tests; upgrade rustls 2018-10-02 13:41:33 -07:00
b59712c439 add ssl handshake timeout tests 2018-10-02 11:32:43 -07:00
724668910b fix ssh handshake timeout 2018-10-02 11:18:59 -07:00
61c7534e03 fix stream flushing 2018-10-02 10:43:23 -07:00
f8b176de9e Fix no_http2 flag in HttpServer (#526) 2018-10-02 20:09:31 +03:00
c8505bb53f content-length bug fix (#525)
* content-length bug fix

* changes.md is updated

* typo
2018-10-02 09:15:48 -07:00
eed377e773 uneeded dep 2018-10-02 00:20:27 -07:00
f3ce6574e4 fix client timer and add slow request tests 2018-10-02 00:19:28 -07:00
f007860a16 cleanup warnings 2018-10-01 22:48:11 -07:00
fdfadb52e1 fix doc test for State 2018-10-01 22:29:30 -07:00
368f73513a set tcp-keepalive for test as well 2018-10-01 22:25:53 -07:00
c674ea9126 add StreamConfiguration service 2018-10-01 22:23:02 -07:00
7c78797d9b proper stop for test_ws_stopped test 2018-10-01 21:30:00 -07:00
84edc57fd9 increase sleep time 2018-10-01 21:19:27 -07:00
127af92541 clippy warnings 2018-10-01 21:16:56 -07:00
e4686f6c8d set socket linger to 0 on timeout 2018-10-01 20:53:22 -07:00
1bac65de4c add websocket stopped test 2018-10-01 20:15:26 -07:00
16945a554a add client shutdown timeout 2018-10-01 20:04:16 -07:00
91af3ca148 simplify h1 dispatcher 2018-10-01 19:18:24 -07:00
2217a152cb expose app error by http service 2018-10-01 15:19:49 -07:00
c1e0b4f322 expose internal http server types and allow to create custom http pipelines 2018-10-01 14:43:06 -07:00
5966ee6192 add HttpServer::register() function, allows to register services in actix net server 2018-09-28 16:03:53 -07:00
4aac3d6a92 refactor keep-alive timer 2018-09-28 15:04:59 -07:00
e95babf8d3 log acctor init errors 2018-09-28 12:37:20 -07:00
f2d42e5e77 refactor acceptor error handling 2018-09-28 11:50:47 -07:00
0f1c80ccc6 deprecate start_incoming 2018-09-28 08:45:49 -07:00
fc5088b55e fix tarpaulin args 2018-09-28 00:08:23 -07:00
bec37fdbd5 update travis config 2018-09-27 22:23:29 -07:00
4b59ae2476 fix ssl config for client connector 2018-09-27 22:15:38 -07:00
d0fc9d7b99 simplify listen_ and bind_ methods 2018-09-27 21:55:44 -07:00
1ff86e5ac4 restore rust-tls support 2018-09-27 21:24:21 -07:00
ecfda64f6d add native-tls support 2018-09-27 20:40:34 -07:00
0bca21ec6d fix ssl tests 2018-09-27 19:57:40 -07:00
3173c9fa83 diesable client timeout for tcp stream acceptor 2018-09-27 19:34:07 -07:00
85445ea809 rename and simplify ServiceFactory trait 2018-09-27 18:33:29 -07:00
d57579d700 refactor acceptor pipeline add client timeout 2018-09-27 18:33:29 -07:00
b6a1cfa6ad update openssl support 2018-09-27 18:33:29 -07:00
9f1417af30 refactor http service builder 2018-09-27 18:33:29 -07:00
0aa0f326f7 fix changes from master 2018-09-27 18:33:29 -07:00
dbb4fab4f7 separate mod for HttpHandler; add HttpHandler impl for Vec<H> 2018-09-27 18:33:29 -07:00
6f3e70a92a simplify application factory 2018-09-27 18:33:29 -07:00
a63d3f9a7a cleanup ServerFactory trait 2018-09-27 18:33:29 -07:00
a3cfc24232 refactor acceptor service 2018-09-27 18:33:29 -07:00
6a61138bf8 enable ssl feature 2018-09-27 18:33:29 -07:00
7cf9af9b55 disable ssl for travis 2018-09-27 18:33:29 -07:00
c9a52e3197 refactor date generatioin 2018-09-27 18:33:29 -07:00
1907102685 switch to actix-net server 2018-09-27 18:33:29 -07:00
52195bbf16 update version 2018-09-27 18:17:58 -07:00
59deb4b40d Try to separate HTTP/1 read & write disconnect handling, to fix #511. (#514) 2018-09-27 18:15:02 -07:00
782eeb5ded Reduced unsafe converage (#520) 2018-09-26 11:56:34 +03:00
1b298142e3 Correct composing of multiple origins in cors (#518) 2018-09-21 08:45:22 +03:00
0dc96658f2 Send response to inform client of error (#515) 2018-09-21 07:24:10 +03:00
f40153fca4 fix node::insert() method, missing next element 2018-09-17 11:39:03 -07:00
764103566d update changes 2018-09-17 10:48:37 -07:00
bfb2f2e9e1 fix node.remove(), update next node pointer 2018-09-17 10:25:45 -07:00
599e6b3385 refactor channel node remove operation 2018-09-17 05:29:07 -07:00
03e318f446 update changes 2018-09-15 17:10:53 -07:00
7449884ce3 fix wrong error message for path deserialize for i32 #510 2018-09-15 17:09:07 -07:00
bbe69e5b8d update version 2018-09-15 10:00:54 -07:00
9d1eefc38f use 5 seconds keep-alive timer by default 2018-09-15 09:57:54 -07:00
d65c72b44d use server keep-alive timer as slow request timer 2018-09-15 09:55:38 -07:00
c3f8b5cf22 clippy warnings 2018-09-11 11:25:32 -07:00
70a3f317d3 fix failing requests to test server #508 2018-09-11 11:24:05 -07:00
513c8ec1ce Merge pull request #505 from Neopallium/master
Fix issue with HttpChannel linked list.
2018-09-11 11:18:33 -07:00
04608b2ea6 Update changes. 2018-09-12 00:27:15 +08:00
70b45659e2 Make Node's traverse method take a closure instead of calling shutdown on each HttpChannel. 2018-09-12 00:27:15 +08:00
e0ae6b10cd Fix bug with HttpChannel linked list. 2018-09-12 00:27:15 +08:00
003b05b095 Don't ignore errors in std::fmt::Debug implementations (#506) 2018-09-11 14:57:55 +03:00
cdb57b840e prepare release 2018-09-07 20:47:54 -07:00
002bb24b26 unhide SessionBackend and SessionImpl traits and cleanup warnings 2018-09-07 20:46:43 -07:00
51982b3fec Merge pull request #503 from uzytkownik/route-regex
Refactor resource route parsing to allow repetition in the regexes
2018-09-07 20:19:31 -07:00
4251b0bc10 Refactor resource route parsing to allow repetition in the regexes 2018-09-06 08:51:55 +02:00
42f3773bec update changes 2018-09-05 09:03:58 -07:00
86fdbb47a5 Fix system_exit in HttpServer (#501) 2018-09-05 10:41:23 +02:00
4ca9fd2ad1 remove debug print 2018-09-03 22:09:12 -07:00
f0f67072ae Read client response until eof if connection header set to close #464 2018-09-03 21:35:59 -07:00
24d1228943 simplify handler path processing 2018-09-03 11:28:47 -07:00
b7a73e0a4f fix Scope::handler doc test 2018-09-02 08:51:26 -07:00
968c81e267 Handling scoped paths without leading slashes #460 2018-09-02 08:14:54 -07:00
d5957a8466 Merge branch 'master' of https://github.com/actix/actix-web 2018-09-02 07:47:45 -07:00
f2f05e7715 allow to register handlers on scope level #465 2018-09-02 07:47:19 -07:00
3439f55288 doc: Add example for using custom nativetls connector (#497) 2018-09-01 18:13:52 +03:00
0425e2776f Fix Issue #490 (#498)
* Add failing testcase for HTTP 404 response with no reason text.

* Include canonical reason test for HTTP error responses.

* Don't send a reason for unknown status codes.
2018-09-01 12:00:32 +03:00
6464f96f8b Merge branch 'master' of https://github.com/actix/actix-web 2018-08-31 18:56:53 -07:00
a2b170fec9 fmt 2018-08-31 18:56:21 -07:00
0b42cae082 update tests 2018-08-31 18:54:19 -07:00
c313c003a4 Fix typo 2018-08-31 17:45:29 -07:00
3fa23f5e10 update version 2018-08-31 17:25:15 -07:00
2d51831899 handle socket read disconnect 2018-08-31 17:24:13 -07:00
e59abfd716 Merge pull request #496 from Neopallium/master
Fix issue with 'Connection: close' in ClientRequest
2018-08-31 17:17:39 -07:00
66881d7dd1 If buffer is empty, read more data before calling parser. 2018-09-01 02:25:05 +08:00
a42a8a2321 Add some comments to clarify logic. 2018-09-01 02:15:36 +08:00
2341656173 Simplify buffer reading logic. Remove duplicate code. 2018-09-01 01:41:38 +08:00
487519acec Add client test for 'Connection: close' as reported in issue #495 2018-09-01 00:34:19 +08:00
af6caa92c8 Merge branch 'master' into master 2018-09-01 00:17:34 +08:00
3ccbce6bc8 Fix issue with 'Connection: close' in ClientRequest 2018-09-01 00:08:53 +08:00
797b52ecbf Update CHANGES.md 2018-08-29 20:58:23 +02:00
4bab50c861 Add ability to pass a custom TlsConnector (#491) 2018-08-29 20:53:31 +02:00
5906971b6d Merge pull request #483 from Neopallium/master
Fix bug with client disconnect immediately after receiving http request.
2018-08-26 10:15:25 -07:00
8393d09a0f Fix tests. 2018-08-27 00:31:31 +08:00
c3ae9997fc Fix bug with http1 client disconnects. 2018-08-26 22:21:05 +08:00
d39dcc58cd Merge pull request #482 from 0x1793d1/master
Fix server startup log message
2018-08-24 20:53:45 -07:00
471a3e9806 Fix server startup log message 2018-08-24 23:21:32 +02:00
48ef18ffa9 update changes 2018-08-23 12:54:59 -07:00
9ef7a9c182 hide AcceptorService 2018-08-23 11:30:49 -07:00
3dafe6c251 hide token and server flags 2018-08-23 11:30:07 -07:00
8dfc34e785 fix tokio-tls IoStream impl 2018-08-23 10:27:32 -07:00
810995ade0 fix tokio-tls dependency #480 2018-08-23 10:10:13 -07:00
1716380f08 clippy fmt 2018-08-23 09:48:01 -07:00
e9c139bdea clippy warnings 2018-08-23 09:47:32 -07:00
cf54be2f17 hide new server api 2018-08-23 09:39:11 -07:00
f39b520a2d Merge pull request #478 from fzgregor/master
Made extensions constructor public
2018-08-23 09:34:47 -07:00
89f414477c Merge branch 'master' into master 2018-08-23 09:34:34 -07:00
986f19af86 Revert back to serde_urlencoded dependecy (#479) 2018-08-21 22:23:17 +03:00
e680541e10 Made extensions constructor public 2018-08-18 19:32:28 +02:00
56bc900a82 Set minimum rustls version that fixes corruption (#474) 2018-08-17 19:53:16 +03:00
bdc9a8bb07 Optionally support tokio-uds's UnixStream as IoStream (#472) 2018-08-17 19:04:15 +03:00
8fe30a5b66 Merge pull request #473 from kornelski/usetest
Fix tests on Unix
2018-08-17 07:20:47 -07:00
a8405d0686 Fix tests on Unix 2018-08-17 13:13:48 +01:00
eb1e9a785f allow to use fn with multiple arguments with .with()/.with_async() 2018-08-16 20:29:06 -07:00
248bd388ca Improve HTTP server docs (#470) 2018-08-16 16:11:15 +03:00
9f5641c85b Add mention of reworked Content-Disposition 2018-08-13 17:37:00 +03:00
d9c7cd96a6 Rework Content-Disposition parsing totally (#461) 2018-08-13 17:34:05 +03:00
bf7779a9a3 add TestRequest::run_async_result helper method 2018-08-09 18:58:14 -07:00
cc3fbd27e0 better ergonomics 2018-08-09 17:25:23 -07:00
26629aafa5 explicit use 2018-08-09 13:41:13 -07:00
2ab7dbadce better ergonomics for Server::service() method 2018-08-09 13:38:10 -07:00
2e8d67e2ae upgrade native-tls package 2018-08-09 13:08:59 -07:00
43b6828ab5 Merge branch 'master' of https://github.com/actix/actix-web 2018-08-09 11:52:45 -07:00
e4ce6dfbdf refactor workers management 2018-08-09 11:52:32 -07:00
6b9fa2c3d9 Merge pull request #458 from davidMcneil/master
Add json2 HttpResponseBuilder method
2018-08-09 02:10:14 -07:00
5713d93158 Merge branch 'master' into master 2018-08-09 08:13:22 +03:00
cfe4829a56 add TestRequest::execute() helper method 2018-08-08 16:13:45 -07:00
b69774db61 fix attr name 2018-08-08 14:23:16 -07:00
542782f28a add HttpRequest::drop_state() 2018-08-08 13:57:13 -07:00
7c8dc4c201 Add json2 tests 2018-08-08 12:17:19 -06:00
7a11c2eac1 Add json2 HttpResponseBuilder method 2018-08-08 11:11:15 -06:00
8eb9eb4247 flush io on complete 2018-08-08 09:12:32 -07:00
992f7a11b3 remove debug println 2018-08-07 22:40:09 -07:00
30769e3072 fix http/2 error handling 2018-08-07 20:48:25 -07:00
57f991280c fix protocol order for rustls acceptor 2018-08-07 13:53:24 -07:00
85acc3f8df deprecate HttpServer::no_http2(), update changes 2018-08-07 12:49:40 -07:00
5bd82d4f03 update changes 2018-08-07 12:00:51 -07:00
58a079bd10 include content-length to error response 2018-08-07 11:56:39 -07:00
16546a707f Merge pull request #453 from DoumanAsh/reserve_status_line_for_server_error
Reserve enough space for ServerError task to write status line
2018-08-07 11:48:55 -07:00
86a5afb5ca Reserve enough space for ServerError task to write status line 2018-08-07 17:34:24 +03:00
9c80d3aa77 Write non-80 port in HOST of client's request (#451) 2018-08-07 10:01:29 +03:00
954f1a0b0f impl FromRequest for () (#449) 2018-08-06 10:44:08 +03:00
f4fba5f481 Merge pull request #447 from DoumanAsh/multiple_set_cookies
Correct setting cookies in HTTP2 writer
2018-08-04 08:58:12 -07:00
995f819eae Merge branch 'master' into multiple_set_cookies 2018-08-04 08:58:00 -07:00
85e7548088 fix adding multiple response headers for http/2 #446 2018-08-04 08:56:33 -07:00
900fd5a98e Correct settings headers for HTTP2
Add test to verify number of Set-Cookies
2018-08-04 18:05:41 +03:00
84b27db218 fix no_http2 flag 2018-08-03 19:40:43 -07:00
ac9180ac46 simplify channel impl 2018-08-03 19:32:46 -07:00
e34b5c08ba allow to pass extra information from acceptor to application level 2018-08-03 19:24:53 -07:00
f3f1e04853 refactor ssl support 2018-08-03 16:09:46 -07:00
036cf5e867 update changes 2018-08-03 08:20:59 -07:00
e61ef7dee4 Use zlib instead of deflate for content encoding (#442) 2018-08-03 14:56:26 +02:00
9a10d8aa7a Fixed headers' formating for CORS Middleware Access-Control-Expose-Headers header value to HTTP/1.1 & HTTP/2 spec-compliant format (#436) 2018-08-03 15:03:11 +03:00
f8e5d7c6c1 Fixed broken build on wrong variable usage (#440) 2018-08-03 14:11:51 +03:00
8c89c90c50 add accept backpressure #250 2018-08-02 23:17:10 -07:00
e9c1889df4 test timing 2018-08-01 16:41:24 -07:00
0da3fdcb09 do not use Arc for rustls config 2018-08-01 10:59:00 -07:00
a5f80a25ff update changes 2018-08-01 10:51:47 -07:00
6d9a1cadad Merge pull request #433 from jrconlin/feat/432
feature: allow TestServer to open a websocket on any URL
2018-08-01 10:45:55 -07:00
97ada3d3d0 Merge branch 'feat/432' of github.com:jrconlin/actix-web into feat/432 2018-08-01 10:27:48 -07:00
115f59dd14 Merge branch 'master' of https://github.com/actix/actix-web into feat/432 2018-08-01 09:59:36 -07:00
972b008a6e remove unsafe error transmute, upgrade failure to 0.1.2 #434 2018-08-01 09:42:12 -07:00
246eafb8d2 Merge branch 'master' of https://github.com/actix/actix-web into feat/432 2018-08-01 09:36:08 -07:00
dca4c110dd feature: allow TestServer to open a websocket on any URL
* added `TestServer::ws_at(uri_str)`
* modified `TestServer::ws()` to call `self.ws_at("/")` to preserve
behavior

Closes #432
2018-08-01 09:30:27 -07:00
58230b15b9 use one thread for accept loop; refactor rust-tls support 2018-07-31 19:51:26 -07:00
aa1e75f071 feature: allow TestServer to open a websocket on any URL
* added `TestServer::ws_at(uri_str)`
* modified `TestServer::ws()` to call `self.ws_at("/")` to preserve
behavior

Closes #432
2018-07-31 16:21:18 -07:00
2071ea0532 HttpRequest::url_for is not working with scopes #429 2018-07-31 15:40:52 -07:00
3bd43090fb use new gzdecoder, fixes gz streaming #228 2018-07-31 09:06:05 -07:00
4dba531bf9 do not override HOST header for client request #428 2018-07-31 08:51:24 -07:00
2072c933ba handle error during request creation 2018-07-30 15:04:52 -07:00
7bc0ace52d move server accept impl to seprate module 2018-07-30 13:42:42 -07:00
4c4d0d2745 update changes 2018-07-30 10:23:28 -07:00
28a855214b Merge pull request #427 from jeizsm/feature/rustls
add rustls
2018-07-30 10:21:37 -07:00
196da6d570 add rustls 2018-07-30 08:21:12 +03:00
b4ed564e5d update changes 2018-07-26 09:11:50 -07:00
80fbc2e9ec Fix stream draining for http/2 connections #290 2018-07-25 15:38:02 -07:00
f58065082e fix missing content-encoding header for h2 connections #421 2018-07-25 10:30:55 -07:00
6048817ba7 Correct flate feature names in documentation 2018-07-25 20:22:18 +03:00
e408b68744 Update cookie dependency (#422) 2018-07-25 18:01:22 +03:00
b878613e10 fix warning 2018-07-24 15:49:46 -07:00
85b275bb2b fix warnings 2018-07-24 15:09:30 -07:00
d6abd2fe22 allow to handle empty path for application with prefix 2018-07-24 14:51:48 -07:00
b79a9aaec7 fix changelog 2018-07-24 14:18:04 -07:00
b9586b3f71 Merge pull request #412 from gdamjan/master
remove the timestamp from the default logger middleware
2018-07-24 14:07:10 -07:00
d3b12d885e Merge branch 'master' into master 2018-07-24 14:07:03 -07:00
f21386708a Merge pull request #416 from axos88/master
Add FromRequest<S> implementation for Option<T> and Result<T> where T: FromRequest<S>
2018-07-24 14:06:08 -07:00
b48a2d4d7b add changes to CHANGES.md 2018-07-24 22:25:48 +02:00
35b754a3ab pr fixes 2018-07-24 09:42:46 +02:00
1079c5c562 Add FromRequest<S> implementation for Result<T> and Option<T> where T:FromRequest<S> 2018-07-24 09:42:46 +02:00
f4bb7efa89 add partialeq, eq, partialord and ord dervie to Path, Form and Query 2018-07-24 09:42:46 +02:00
0099091e96 remove unnecessary use 2018-07-24 09:42:46 +02:00
c352a69d54 fix dead links 2018-07-23 13:22:16 -07:00
f5347ec897 Merge pull request #415 from DenisKolodin/cookie-http-only
Add http_only flag to CookieSessionBackend
2018-07-23 02:54:23 -07:00
b367f07d56 Add http_only flag to CookieSessionBackend 2018-07-23 12:49:59 +03:00
6a75a3d683 document the change in the default logger 2018-07-21 16:01:42 +02:00
56b924e155 remove the timestamp from the default logger middleware
env_logger and other logging systems will (or should) already add their
own timestamp.
2018-07-21 15:15:28 +02:00
4862227df9 fix not implemented panic #410 2018-07-21 05:58:08 -07:00
f6499d9ba5 publish stable docs on actix.rs site 2018-07-21 04:19:02 -07:00
7138bb2f29 update migration 2018-07-21 01:00:50 -07:00
8cb510293d update changes 2018-07-20 14:10:41 -07:00
040d9d2755 Merge branch 'master' of github.com:actix/actix-web 2018-07-20 12:43:44 -07:00
2043bb5ece do not reallocate waiters 2018-07-20 10:20:41 -07:00
a751df2589 Initial config for static files (#405) 2018-07-20 07:49:25 +03:00
f6e35a04f0 Just a bit of sanity check for short paths (#409) 2018-07-20 07:48:57 +03:00
0925a7691a ws/context: Increase write() visibility to public (#402)
This type is introduced to avoid confusion between the `.binary()` and `.write_raw()` methods on WebSocket contexts
2018-07-19 20:04:13 +03:00
2988a84e5f Expose leaked private ContentDisposition (#406) 2018-07-19 20:03:45 +03:00
6b10e1eff6 rename PayloadHelper 2018-07-18 10:01:28 +06:00
85672d1379 fix client connector wait queue 2018-07-18 01:23:56 +06:00
373f2e5028 add release stat 2018-07-17 17:38:16 +06:00
f9f259e718 Merge branch 'master' of github.com:actix/actix-web 2018-07-17 17:23:23 +06:00
d43902ee7c proper handling for client connection release 2018-07-17 17:23:03 +06:00
a7ca5fa5d8 Add few missing entries to changelog 2018-07-17 11:10:04 +03:00
29a275b0f5 Session should write percent encoded cookies and add cookie middleware test (#393)
* Should write percent encoded cookies to HTTP response

* Add cookie middleware test
2018-07-17 08:38:18 +03:00
1af5aa3a3e calculate client request timeout 2018-07-17 02:30:21 +06:00
bccd7c7671 add wait queue size stat to client connector 2018-07-17 01:57:57 +06:00
2a8c2fb55e export Payload 2018-07-16 12:14:24 +06:00
2dd57a48d6 checks nested scopes in has_resource() 2018-07-16 11:33:29 +06:00
22385505a3 clippy warnings and fmt 2018-07-16 11:17:45 +06:00
5888f01317 use has_prefixed_route for NormalizePath helper 2018-07-16 11:13:41 +06:00
b7a3fce17b simplify has_prefixed_route() 2018-07-16 11:10:51 +06:00
bce05e4fcb Merge pull request #381 from OtaK/fix/has_route_prefixes
Add prefix aware RouteInfo::has_prefixed_route()
2018-07-16 10:58:50 +06:00
3373847a14 allocate buffer for request payload extractors 2018-07-16 00:40:22 +06:00
8f64508887 Added RouteInfo::has_prefixed_route() method for route matching with prefix awareness 2018-07-15 19:37:20 +02:00
30c84786b7 Merge pull request #399 from actix/router-refactor
Router refactoring
2018-07-15 19:16:07 +06:00
2e5f627050 do not force install tarpaulin 2018-07-15 19:15:36 +06:00
2214492792 use assert and restore test case 2018-07-15 18:53:02 +06:00
c43b6e3577 cargo tarpaulin 2018-07-15 16:39:15 +06:00
42d3e86941 calculate prefix dynamicly 2018-07-15 16:25:56 +06:00
b759dddf5a simplify application prefix impl 2018-07-15 16:25:56 +06:00
9570c1cccd rename RouteInfo 2018-07-15 16:25:56 +06:00
da915972c0 refactor router 2018-07-15 16:25:56 +06:00
cf976d296f Merge pull request #397 from actix/Turbo87-patch-1
error: Fix documentation typo
2018-07-14 09:38:43 +06:00
9012cf43fe error: Fix documentation typo 2018-07-14 00:05:07 +02:00
7d753eeb8c Private serde fork (#390)
* Fork serde_urlencoded

* Apply enum PR https://github.com/nox/serde_urlencoded/pull/30

* Add test to verify enum in query

* Docs are updated to show example of how to use enum.
2018-07-13 09:59:09 +03:00
4395add1c7 update travis config 2018-07-13 00:05:01 +06:00
35911b832a Merge branch 'master' of github.com:actix/actix-web 2018-07-12 23:59:10 +06:00
b8b90d9ec9 rename ResourceHandler to Resource 2018-07-12 15:30:01 +06:00
422a870cd7 Merge pull request #387 from actix/fix-missing-content-length
fix missing content length
2018-07-12 16:18:55 +10:00
db005af1af clippy warnings 2018-07-12 10:41:49 +06:00
8e462c5944 use write instead format 2018-07-12 10:35:09 +06:00
86e44de787 pin failure crate 2018-07-12 10:29:37 +06:00
d9988f3ab6 fix missing content length
fix missing content length when no compression is used
2018-07-11 21:21:32 +10:00
696152f763 Merge pull request #377 from Diggsey/apply-mask
Refactor `apply_mask` implementation, removing dead code paths and re…
2018-07-11 13:36:08 +06:00
f38a370b94 update changes 2018-07-11 13:34:40 +06:00
28b36c650a fix h2 compatibility 2018-07-11 13:25:07 +06:00
b22132d3d6 Merge branch 'master' into apply-mask 2018-07-11 13:15:35 +06:00
19ae5e9489 Merge branch 'master' of github.com:actix/actix-web 2018-07-11 12:56:53 +06:00
9aef34e768 remove & to &mut transmute #385 2018-07-11 12:56:35 +06:00
bed961fe35 Lessen numbers of jobs for AppVeyor 2018-07-11 09:23:17 +03:00
87824a9cf6 Refactor apply_mask implementation, removing dead code paths and reducing scope of unsafety 2018-07-08 13:56:43 +01:00
82920e1ac1 Do not override user settings on signals and stop handling (#375) 2018-07-08 09:01:44 +03:00
110605f50b stop actor context on error #311 2018-07-08 09:41:55 +06:00
00c97504b6 Merge pull request #368 from Diggsey/master
Remove reimplementation of `LazyCell`
2018-07-07 09:46:44 +06:00
85012f947a Remove reimplementation of LazyCell 2018-07-06 22:28:08 +01:00
62ba01fc15 update changes 2018-07-06 15:00:14 +06:00
5b7aed101a remove unsafe 2018-07-06 13:54:43 +06:00
1c3b32169e remove stream from WebsocketsContext::with_factory 2018-07-06 12:11:40 +06:00
cfa470db50 close conneciton for head requests 2018-07-06 09:21:24 +06:00
a5f7a67b4d clippy warnings 2018-07-06 08:24:44 +06:00
185e710dc8 do not drop content-encoding header in case of identity #363 2018-07-06 08:24:36 +06:00
9070d59ea8 do not read head payload 2018-07-06 08:11:36 +06:00
2a25caf2c5 Merge branch 'master' of github.com:actix/actix-web 2018-07-06 07:49:50 +06:00
7d96b92aa3 add check for usize cast 2018-07-06 07:46:47 +06:00
67e4cad281 Introduce method to set header if it is missing only (#364)
Also let default headers use it.

Closes #320
2018-07-05 19:27:18 +03:00
080f232a0f Use StaticFile default handler when file is inaccessible (#357)
* Use Staticfile default handler on all error paths

* Return an error from StaticFiles::new() if directory doesn't exist
2018-07-05 12:34:13 +03:00
ac3a76cd32 update httparse version 2018-07-05 13:21:33 +06:00
8058d15624 clippy warnings 2018-07-05 13:16:16 +06:00
05a43a855e remove unsafe 2018-07-05 13:00:46 +06:00
80339147b9 call disconnect on write error 2018-07-05 12:50:54 +06:00
6af2f5d642 re-enable start_incoming support 2018-07-05 12:14:10 +06:00
d7762297da update actix dependency 2018-07-05 12:02:32 +06:00
d5606625a2 remove public Clone for Request 2018-07-04 22:57:40 +06:00
5d79114239 optimize Request handling 2018-07-04 22:52:49 +06:00
f559f23e1c Merge branch 'master' of github.com:actix/actix-web 2018-07-04 21:02:40 +06:00
6fd686ef98 cleanup warnings 2018-07-04 21:01:27 +06:00
4c5a63965e use new actix context api 2018-07-04 17:04:23 +06:00
09aabc7b3b plain/text -> text/plain in comment (#362) 2018-07-04 11:17:44 +03:00
b6d26c9faf Merge pull request #348 from actix/request-mutability
Request mutability
2018-07-02 23:52:42 +06:00
fec6047ddc refactor HttpRequest mutability 2018-07-02 23:35:32 +06:00
445ea043dd remove unsafes 2018-07-02 23:32:29 +06:00
0be5448597 Properly escape special characters in fs/directory_listing. (#355) 2018-06-30 15:01:48 +03:00
0f27389e72 set length of vector to max_bytes (closes #345) (#346) 2018-06-26 08:09:12 +03:00
a9425a866b Fix duplicate tail of StaticFiles with index_file
Map from 0.6 to master
2018-06-25 19:59:55 +03:00
800c404c72 explicit response release 2018-06-25 10:10:02 +06:00
32212bad1f simplify http response pool 2018-06-25 09:08:28 +06:00
d1b73e30e0 update comments 2018-06-24 22:27:30 +06:00
c0cdc39ba9 do not store cookies on client response 2018-06-24 22:21:04 +06:00
8e8a68f90b add empty output stream 2018-06-24 22:05:44 +06:00
989cd61236 handle empty te 2018-06-24 10:59:01 +06:00
33260c7b35 split encoding module 2018-06-24 10:42:20 +06:00
40ca9ba9c5 simplify write buffer 2018-06-24 10:30:58 +06:00
45682c04a8 refactor content encoder 2018-06-24 08:54:01 +06:00
348491b18c fix alpn connector 2018-06-23 17:59:45 +06:00
3d2226aa9e Merge branch 'master' of github.com:actix/actix-web 2018-06-23 12:40:45 +06:00
cf38183dcb refactor client connector waiters maintenance 2018-06-23 12:40:21 +06:00
e3dc6f0ca8 refactor h1decoder 2018-06-23 12:28:55 +06:00
a5369aed8b Changes a leaked box into an Rc<String> and makes resource() return an Option (#343) 2018-06-23 08:16:52 +02:00
ff0ab733e4 remove unsafe from mask 2018-06-23 11:51:02 +06:00
d1318a35a0 remove unnecessary unsafes 2018-06-23 10:29:23 +06:00
756227896b update set_date impl 2018-06-23 10:13:09 +06:00
4fadff63f4 Use Box::leak for dynamic param names 2018-06-23 09:57:03 +06:00
7bc7b4839b Switch from fnv to a identity hasher in extensions (#342) 2018-06-22 11:32:32 +02:00
dda6ee95df Changes the router to use atoms internally (#341) 2018-06-22 09:33:32 +02:00
765c38e7b9 remove libc dependency 2018-06-22 11:47:33 +06:00
6c44575923 transmute names once 2018-06-22 11:44:38 +06:00
fc7238baee refactor read_from_io 2018-06-22 11:30:40 +06:00
edd22bb279 refactor read_from_io 2018-06-22 09:01:20 +06:00
17c033030b Revert "remove unnecessary use of unsafe in read_from_io"
This reverts commit da237611cb.
2018-06-22 08:55:19 +06:00
3afdf3fa7e Merge pull request #335 from gnzlbg/fix_unsafe
remove unnecessary use of unsafe in read_from_io
2018-06-22 07:23:14 +06:00
50fbef88fc cleanup srver pipeline 2018-06-21 23:51:25 +06:00
c9069e9a3c remove unneeded UnsafeCell 2018-06-21 23:21:28 +06:00
65ca563579 use read only self for Middleware 2018-06-21 23:06:23 +06:00
3de9284592 Handler::handle uses &self instead of mutabble reference 2018-06-21 17:07:54 +06:00
5a9992736f Merge pull request #339 from joshleeb/propogate-scope-default-resource
Propagate scope default resource
2018-06-21 15:40:02 +06:00
0338767264 Update CHANGES for default scope propagation 2018-06-21 19:37:34 +10:00
c5e8c1b710 Propagate default resources to underlying scopes 2018-06-21 18:17:27 +10:00
b5594ae2a5 Fix doc api example 2018-06-21 14:11:00 +06:00
58d1f4a4aa switch to actix master 2018-06-21 13:34:36 +06:00
b7d813eeba update tests 2018-06-21 12:04:00 +06:00
8e160ebda7 clippy warning 2018-06-21 11:49:36 +06:00
0093b7ea5a refactor extractor configuration #331 2018-06-21 11:47:01 +06:00
75eec8bd4f fix condition 2018-06-21 11:23:21 +06:00
ebc59cf7b9 add unsafe checks #331 2018-06-21 11:20:21 +06:00
c2c4a5ba3f fix failure Send+Sync compatibility 2018-06-21 10:45:24 +06:00
dbd093075d Merge pull request #338 from tbroadley/fix-typos
Fix typos
2018-06-21 10:13:30 +06:00
1be27e17f8 convert timer error to io error 2018-06-21 10:05:20 +06:00
8b0fbb85d1 SendRequest execution fails with the entered unreachable code #329 2018-06-21 09:52:18 +06:00
cfe6725eb4 Allow to disable masking for websockets client 2018-06-21 09:49:33 +06:00
f815c1c096 Add test for default_resource scope propagation 2018-06-21 13:10:40 +10:00
280eae4335 Merge pull request #334 from Vurich/master
Fix some unsoundness
2018-06-21 07:15:33 +06:00
bd8cbfff35 docs: fix typos 2018-06-20 21:05:26 -04:00
da237611cb remove unnecessary use of unsafe in read_from_io 2018-06-20 13:14:53 +02:00
Jef
234c60d473 Fix some unsoundness
This improves the sound implementation of `fn route`.
Previously this function would iterate twice but we
can reduce the overhead without using `unsafe`.
2018-06-20 10:53:18 +02:00
2f917f3700 various cleanups and comments 2018-06-20 01:27:41 +06:00
311f0b23a9 cleanup more code 2018-06-20 00:36:32 +06:00
a69c1e3de5 remove unsafe from scope impl 2018-06-19 23:46:58 +06:00
c427fd1241 Merge pull request #328 from xfix/remove-some-uses-of-unsafe-from-frame-message
Remove some uses of unsafe from Frame::message
2018-06-19 21:52:41 +06:00
adcb4e1492 Merge pull request #327 from xfix/remove-use-of-unsafe-from-pipeline-poll
Remove use of unsafe from Pipeline#poll
2018-06-19 19:58:15 +06:00
3b1124c56c Merge branch 'master' into remove-some-uses-of-unsafe-from-frame-message 2018-06-19 19:20:40 +06:00
cafde76361 Merge branch 'master' into remove-use-of-unsafe-from-pipeline-poll 2018-06-19 19:20:25 +06:00
bfb93cae66 Update connector.rs 2018-06-19 19:19:31 +06:00
b5c1e42feb Merge branch 'master' into remove-use-of-unsafe-from-pipeline-poll 2018-06-19 18:30:37 +06:00
e884e7e84e Remove some uses of unsafe from Frame::message 2018-06-19 14:11:53 +02:00
877e177b60 Remove use of unsafe from Pipeline#poll 2018-06-19 13:42:44 +02:00
27b6af2800 refactor route matching 2018-06-19 16:45:26 +06:00
5c42b0902f better doc api examples 2018-06-19 12:07:07 +06:00
247e8727cb ClientBody is not needed 2018-06-19 10:15:16 +06:00
362b14c2f7 remove unsafe cell from ws client 2018-06-19 09:36:17 +06:00
261ad31b9a remove some unsafe code 2018-06-19 07:44:01 +06:00
68cd5bdf68 use actix 0.6 2018-06-18 09:18:03 +06:00
26f37ec2e3 refactor HttpHandlerTask trait 2018-06-18 05:45:54 +06:00
ef15646bd7 refactor edfault cpu pool 2018-06-18 04:56:18 +06:00
a5bbc455c0 cleanup mut transform 2018-06-18 04:41:41 +06:00
6ec8352612 method only for tests 2018-06-18 01:05:02 +06:00
f0f19c14d2 remove wsclient 2018-06-18 01:03:47 +06:00
daed502ee5 make mut api private 2018-06-18 01:03:07 +06:00
9d114d785e remove Clone from ExtractorConfig 2018-06-18 00:19:07 +06:00
ea118edf56 do not use references in ConnectionInfo 2018-06-18 00:01:41 +06:00
e1db47d550 refactor server settings 2018-06-17 23:51:20 +06:00
38fe8bebec fix doc string 2018-06-17 08:57:51 +06:00
c3f295182f use HashMap for HttpRequest::query() 2018-06-17 08:54:30 +06:00
b6ed778775 remove HttpMessage::range() 2018-06-17 08:48:50 +06:00
0f2aac1a27 remove unneed Send and Sync 2018-06-17 08:32:22 +06:00
70244c29e0 update doc api examples 2018-06-17 04:09:07 +06:00
a7a062fb68 clippy warnings 2018-06-17 03:26:34 +06:00
f3a73d7dde update changelog 2018-06-17 03:24:08 +06:00
879b2b5bde port Extensions from http crate #315 2018-06-17 03:22:08 +06:00
33050f55a3 remove Context::actor() method 2018-06-17 03:10:44 +06:00
e4443226f6 update actix usage 2018-06-17 02:58:56 +06:00
342a194605 fix handling ServerCommand #316 2018-06-16 22:56:27 +06:00
566b16c1f7 Merge branch 'master' of github.com:actix/actix-web 2018-06-14 11:42:27 +02:00
8261cf437d update actix api 2018-06-13 23:37:19 -07:00
8a8e6add08 Merge pull request #314 from DJMcNab/app-cleanup
remove duplication of `App::with_state` in `App::new`
2018-06-14 01:19:56 +03:00
b79307cab1 Merge branch 'master' into app-cleanup 2018-06-14 01:01:11 +03:00
4c646962a9 Merge pull request #312 from eddomuke/master
Add HttpMessage::readlines()
2018-06-14 00:40:29 +03:00
cb77f7e688 Add HttpMessage::readlines() 2018-06-14 00:19:48 +03:00
1bee528018 move ReadlinesError to error module 2018-06-13 22:59:36 +03:00
ad9aacf521 change poll method of Readlines 2018-06-13 22:41:35 +03:00
f8854f951c remove duplication of App::with_state in App::new 2018-06-13 20:31:20 +01:00
6d95e34552 add HttpMessage::readlines() 2018-06-13 20:45:31 +03:00
6c765739d0 add HttpMessage::readlines() 2018-06-13 20:43:03 +03:00
c8528e8920 Merge pull request #308 from eddomuke/master
Allow to override Form extractor error
2018-06-13 01:53:32 +03:00
0a080d9fb4 add test for form extractor 2018-06-13 01:33:28 +03:00
45b408526c Merge branch 'master' into master 2018-06-13 00:53:46 +03:00
1a91854270 Merge branch 'master' of github.com:actix/actix-web 2018-06-12 14:50:41 -07:00
99092fdf06 http/2 end-of-frame is not set if body is empty bytes #307 2018-06-12 14:50:21 -07:00
748ff389e4 Allow to override Form extractor error 2018-06-13 00:47:47 +03:00
b679b4cabc Merge pull request #306 from eddomuke/master
add ClientRequestBuilder::form()
2018-06-12 13:33:16 -07:00
ed7cbaa772 fix form_extractor test 2018-06-12 23:04:54 +03:00
e6bbda0efc add serialize 2018-06-12 22:42:15 +03:00
94283a73c2 make into_string, to_string 2018-06-12 22:31:33 +03:00
ffca416463 add test for ClientRequestBuilder::form() 2018-06-12 22:16:20 +03:00
9cc7651c22 add change to CHANGES.md 2018-06-12 20:32:16 +03:00
8af082d873 remove FormPayloadError 2018-06-12 20:26:09 +03:00
d4d3add17d add ClientRequestBuilder::form() 2018-06-12 19:30:00 +03:00
ce6f9e848b Merge pull request #305 from axon-q/response-cookies
Add HttpResponse methods to retrieve, add, and delete cookies
2018-06-12 14:39:06 +00:00
d8e1fd102d add cookie methods to HttpResponse 2018-06-12 13:56:53 +00:00
e414a52b51 content_disposition: remove unnecessary allocations 2018-06-12 13:48:23 +00:00
4d69e6d0b4 fs: minor cleanups to content_disposition 2018-06-12 13:47:49 +00:00
6f38d769a8 Merge pull request #304 from kazcw/master
fix url in example
2018-06-12 03:58:48 -07:00
48f77578ea fix url in example 2018-06-11 21:55:05 -07:00
9b012b3304 do not allow stream or actor responses for internal error #301 2018-06-11 19:45:17 -07:00
a0344eebeb InternalError can trigger memory unsafety #301 2018-06-11 18:54:36 -07:00
b9f6c313d4 Merge branch 'master' of github.com:actix/actix-web 2018-06-11 12:56:33 -07:00
ef420a8bdf fix docs.rs 2018-06-11 12:21:09 -07:00
0d54b6f38e Implement Responder for Option #294 (#297) 2018-06-11 14:05:41 +03:00
9afc3b6737 api docs link 2018-06-10 10:31:19 -07:00
ef88fc78d0 Merge branch 'master' of github.com:actix/actix-web 2018-06-10 10:25:05 -07:00
9dd66dfc22 better name for error 2018-06-10 10:24:34 -07:00
87a822e093 fix deprecated warnings 2018-06-10 10:14:13 -07:00
3788887c92 Merge pull request #293 from axon-q/static-file-updates
Better Content-Type and Content-Disposition handling for static files
2018-06-09 08:51:42 -07:00
785d0e24f0 Merge branch 'master' into static-file-updates 2018-06-09 08:21:34 -07:00
818d0bc187 new StreamHandler impl 2018-06-09 07:53:46 -07:00
aee24d4af0 minor syntax changes 2018-06-09 14:47:06 +00:00
fee203b402 update changelog 2018-06-09 14:02:05 +00:00
8681a346c6 fs: refactor Content-Type and Content-Disposition handling 2018-06-09 13:56:01 +00:00
1fdf6d13be content_disposition: add doc example 2018-06-09 13:38:21 +00:00
3751656722 expose fs::file_extension_to_mime() function 2018-06-09 11:20:06 +00:00
9151d61eda allow to use custom resolver for ClientConnector 2018-06-08 16:33:57 -07:00
4fe2f6b763 Merge pull request #284 from axon-q/multipart-content-disposition
multipart: parse and validate Content-Disposition
2018-06-07 21:20:18 -07:00
5a7902ff9a Merge branch 'master' into multipart-content-disposition 2018-06-07 21:20:11 -07:00
172b514fef Merge pull request #288 from memoryruins/patch-1
Update TechEmpower benchmarks to round 16
2018-06-07 21:09:49 -07:00
efb5d13280 readme: link to TechEmpower r16 benchmarks 2018-06-07 23:55:08 -04:00
f9f2ed04ab fix doc test 2018-06-07 20:22:23 -07:00
ce40ab307b update changes 2018-06-07 20:09:08 -07:00
f7ef8ae5a5 add Host predicate 2018-06-07 20:00:54 -07:00
60d40df545 fix clippy warning 2018-06-07 19:46:46 -07:00
f7bd6eeedc add application filters 2018-06-07 19:46:38 -07:00
a11f3c112f fix doc test 2018-06-07 21:18:51 +00:00
e9f59bc7d6 Merge branch 'master' into multipart-content-disposition 2018-06-07 11:02:53 -07:00
e970846167 update changelog 2018-06-07 17:59:35 +00:00
56e0dc06c1 defer parsing until user method call 2018-06-07 17:29:46 +00:00
789af0bbf2 Added improved failure interoperability with downcasting (#285)
Deprecates Error::cause and introduces failure interoperability functions and downcasting.
2018-06-07 18:53:27 +02:00
97b5410aad remove Option from ContentDisposition::from_raw() argument 2018-06-07 12:55:35 +00:00
a6e07c06b6 move CD parsing to Content-Type parsing location 2018-06-07 12:35:10 +00:00
31a301c9a6 fix multipart test 2018-06-07 11:38:35 +00:00
5a37a8b813 restore hyper tests 2018-06-07 10:55:36 +00:00
c0c1817b5c remove unicase dependency 2018-06-07 10:33:00 +00:00
82c888df22 fix test 2018-06-07 09:10:46 +00:00
936ba2a368 multipart: parse and validate Content-Disposition 2018-06-06 14:06:01 +00:00
2d0b609c68 travis config 2018-06-05 10:08:42 -07:00
6467d34a32 update release date 2018-06-05 09:45:07 -07:00
2b616808c7 metadata for docs.rs 2018-06-05 09:00:21 -07:00
e5f7e4e481 update changelog 2018-06-05 08:55:28 -07:00
d1da227ac5 fix multipart boundary parsing #282 2018-06-05 08:53:51 -07:00
960a8c425d update changelog 2018-06-05 07:40:11 -07:00
f94fd9ebee CORS: Do not validate Origin header on non-OPTION requests #271 2018-06-05 07:39:47 -07:00
67ee24f9a0 Merge pull request #274 from mockersf/user-agent
add default value for header User-Agent in requests
2018-06-04 14:04:52 -07:00
5004821cda Merge branch 'master' into user-agent 2018-06-04 14:04:45 -07:00
ae7a0e993d update changelog 2018-06-04 13:43:52 -07:00
984791187a Middleware::response is not invoked if error result was returned by another Middleware::start #255 2018-06-04 13:42:47 -07:00
b07c50860a update changelog 2018-06-04 22:34:07 +02:00
eb0909b3a8 Merge branch 'master' into user-agent 2018-06-04 10:20:53 -07:00
ca3fb11f8b add actix-web version in header 2018-06-04 08:15:04 +02:00
47eb4e3d3d Merge pull request #278 from mbrobbel/patch-2
Fix typo
2018-06-03 16:28:51 -07:00
268c5d9238 Fix typo 2018-06-03 20:28:08 +02:00
86be54df71 add default value for header User-Agent in requests 2018-06-03 15:48:00 +02:00
ea018e0ad6 better examle in doc string 2018-06-02 16:03:23 -07:00
b799677532 better error messages for overflow errors 2018-06-02 15:10:48 -07:00
8c7182f6e6 Merge pull request #270 from DoumanAsh/payload_err
Specialize ResponseError for PayloadError
2018-06-02 15:06:55 -07:00
7298c7aabf Merge branch 'master' into payload_err 2018-06-02 15:04:22 -07:00
7e0706a942 implement Debug for Form, Query, Path extractors 2018-06-02 15:00:11 -07:00
698f0a1849 update changelog 2018-06-02 15:00:11 -07:00
8b8a3ac01d Support chunked encoding for UrlEncoded body #262 2018-06-02 15:00:06 -07:00
7ab23d082d fix doc test 2018-06-02 13:45:29 -07:00
913dce0a72 Merge branch 'master' into payload_err 2018-06-02 23:10:06 +03:00
2a9b57f489 Correct docstring 2018-06-02 22:27:43 +03:00
fce8dd275a Specialize ResponseError for PayloadError
Closes #257
2018-06-02 22:20:22 +03:00
3c472a2f66 remove debug prints 2018-06-02 11:57:49 -07:00
dcb561584d remove debug print 2018-06-02 11:55:50 -07:00
593a66324f update changelog 2018-06-02 11:45:37 -07:00
4a39216aa7 fixed HttpRequest::url_for for a named route with no variables #265 2018-06-02 11:44:09 -07:00
8d905c8504 add links to migration 2018-06-02 09:28:32 -07:00
33326ea41b fix layout 2018-06-02 09:25:11 -07:00
0457fe4d61 add System changes to migration guide 2018-06-02 09:19:13 -07:00
cede817915 update changelog 2018-06-02 09:15:44 -07:00
3bfed36fcc do not re-export actix_inner 2018-06-02 09:14:47 -07:00
0ff5f5f448 update migration 2018-06-02 09:01:51 -07:00
2f476021d8 Merge pull request #267 from joshleeb/trait-middleware-mut-self
Update Middleware Trait to Use `&mut self`
2018-06-02 08:54:30 -07:00
a61a1b0efe Merge branch 'master' into trait-middleware-mut-self 2018-06-02 08:54:00 -07:00
e041e9d3b7 Merge pull request #268 from killercup/docs/no-more-missing-docs
No more missing docs
2018-06-02 08:52:14 -07:00
890a7e70d6 Add missing API docs
These were written without much knowledge of the actix-web internals!
Please review carefully!
2018-06-02 15:52:50 +02:00
47b7be4fd3 Add warning for missing API docs 2018-06-02 15:50:45 +02:00
9c9eb62031 Update Middleware trait to use &mut self 2018-06-02 16:47:18 +10:00
8d73c30dae Merge pull request #266 from killercup/docs/fix-typos-and-run-more-code
Fix some ResourceHandler docs
2018-06-01 16:37:34 -07:00
d912bf8771 Add more docs to ResourceHandler API 2018-06-02 00:57:24 +02:00
f414a491dd Fix some ResourceHandler docs
Re-enables code blocks as doc tests to prevent them failing in the
future.
2018-06-02 00:57:07 +02:00
8f42fec9b2 stable compat 2018-06-01 12:17:13 -07:00
8452c7a044 fix doc api example 2018-06-01 11:22:40 -07:00
009ee4b3db update changelog 2018-06-01 10:55:54 -07:00
3e0a71101c drop with2 and with3 2018-06-01 10:54:23 -07:00
c8930b7b6b fix rustfmt formatting 2018-06-01 10:27:23 -07:00
3f5a39a5b7 cargo fmt 2018-06-01 09:37:14 -07:00
154cd3c5de better actix mod re-exports 2018-06-01 09:36:16 -07:00
80965d7a9a Re-export actix dependency. Closes #260 (#264)
- Re-export actix's prelude into actix namespace
- Removing implicit dependency on root's actix module
2018-05-31 20:43:14 +03:00
77becb9bc0 fix doc string 2018-05-29 18:48:39 -07:00
dde266b9ef fix doc string 2018-05-29 18:31:39 -07:00
34fd9f8148 travis config 2018-05-29 18:18:05 -07:00
a64205e502 refactor TransferEncoding; allow to use client api with threaded tokio runtime 2018-05-29 16:32:39 -07:00
844be8d9dd fix ssl test server 2018-05-29 10:59:24 -07:00
dffb7936fb Merge branch 'master' of github.com:actix/actix-web 2018-05-29 10:31:43 -07:00
ecd05662c0 use new actix system api 2018-05-29 10:31:37 -07:00
6eee3d1083 Merge pull request #258 from mbrobbel/patch-1
Fix typo in httpresponse.rs
2018-05-29 09:15:39 -07:00
6b43fc7068 Fix typo in httpresponse.rs 2018-05-29 18:11:10 +02:00
fb582a6bca fix connector 2018-05-27 05:18:37 -07:00
be2ceb7c66 update actix Addr; make ClientConnector thread safe 2018-05-27 05:02:49 -07:00
7c71171602 Merge pull request #248 from bbigras/same-site
Add same-site to CookieSessionBackend
2018-05-26 08:02:12 -07:00
4dcecd907b Add same-site to CookieSessionBackend
closes #247
2018-05-25 19:18:16 -04:00
255cd4917d fix doc test 2018-05-24 22:04:14 -07:00
f48702042b min rustc version 2018-05-24 21:09:20 -07:00
690169db89 migrate to tokio 2018-05-24 21:03:16 -07:00
565bcfb561 Merge pull request #245 from svartalf/response-builder-cookies-doc
Updating docs for HttpResponseBuilder::del_cookie
2018-05-24 12:42:08 -07:00
36f933ce1d Updating docs for HttpResponseBuilder::del_cookie 2018-05-24 21:53:35 +03:00
111b6835fa fix comment 2018-05-24 11:06:15 -07:00
bf63be3bcd bump version 2018-05-24 09:24:04 -07:00
271 changed files with 42676 additions and 32951 deletions

View File

@ -1,37 +1,15 @@
environment:
global:
PROJECT_NAME: actix
PROJECT_NAME: actix-web
matrix:
# Stable channel
- TARGET: i686-pc-windows-gnu
CHANNEL: 1.24.0
- TARGET: i686-pc-windows-msvc
CHANNEL: 1.24.0
- TARGET: x86_64-pc-windows-gnu
CHANNEL: 1.24.0
- TARGET: x86_64-pc-windows-msvc
CHANNEL: 1.24.0
# Stable channel
- TARGET: i686-pc-windows-gnu
CHANNEL: stable
- TARGET: i686-pc-windows-msvc
CHANNEL: stable
- TARGET: x86_64-pc-windows-gnu
CHANNEL: stable
- TARGET: x86_64-pc-windows-msvc
CHANNEL: stable
# Beta channel
- TARGET: i686-pc-windows-gnu
CHANNEL: beta
- TARGET: i686-pc-windows-msvc
CHANNEL: beta
- TARGET: x86_64-pc-windows-gnu
CHANNEL: beta
- TARGET: x86_64-pc-windows-msvc
CHANNEL: beta
# Nightly channel
- TARGET: i686-pc-windows-gnu
CHANNEL: nightly
- TARGET: i686-pc-windows-msvc
CHANNEL: nightly
- TARGET: x86_64-pc-windows-gnu
@ -59,4 +37,5 @@ build: false
# Equivalent to Travis' `script` phase
test_script:
- cargo clean
- cargo test --no-default-features --features="flate2-rust"

View File

@ -1,5 +1,5 @@
language: rust
sudo: false
sudo: required
dist: trusty
cache:
@ -8,12 +8,11 @@ cache:
matrix:
include:
- rust: 1.24.0
- rust: stable
- rust: beta
- rust: nightly
- rust: nightly-2019-04-02
allow_failures:
- rust: nightly
- rust: nightly-2019-04-02
env:
global:
@ -23,33 +22,35 @@ env:
before_install:
- sudo add-apt-repository -y ppa:0k53d-karl-f830m/openssl
- sudo apt-get update -qq
- sudo apt-get install -qq libssl-dev libelf-dev libdw-dev cmake gcc binutils-dev libiberty-dev
- sudo apt-get install -y openssl libssl-dev libelf-dev libdw-dev cmake gcc binutils-dev libiberty-dev
before_cache: |
if [[ "$TRAVIS_RUST_VERSION" == "nightly-2019-04-02" ]]; then
RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install cargo-tarpaulin
fi
# Add clippy
before_script:
- export PATH=$PATH:~/.cargo/bin
script:
- |
if [[ "$TRAVIS_RUST_VERSION" != "1.24.0" ]]; then
cargo clean
cargo test --features="alpn,tls" -- --nocapture
fi
- |
if [[ "$TRAVIS_RUST_VERSION" == "1.24.0" ]]; then
bash <(curl https://raw.githubusercontent.com/xd009642/tarpaulin/master/travis-install.sh)
USE_SKEPTIC=1 cargo tarpaulin --out Xml --no-count
bash <(curl -s https://codecov.io/bash)
echo "Uploaded code coverage"
fi
- cargo update
- cargo check --all --no-default-features
- cargo test --all-features --all -- --nocapture
# Upload docs
after_success:
- |
if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_PULL_REQUEST" = "false" && "$TRAVIS_BRANCH" == "master" && "$TRAVIS_RUST_VERSION" == "stable" ]]; then
cargo doc --features "alpn, tls, session" --no-deps &&
cargo doc --no-deps --all-features &&
echo "<meta http-equiv=refresh content=0;url=os_balloon/index.html>" > target/doc/index.html &&
git clone https://github.com/davisp/ghp-import.git &&
./ghp-import/ghp_import.py -n -p -f -m "Documentation upload" -r https://"$GH_TOKEN"@github.com/"$TRAVIS_REPO_SLUG.git" target/doc &&
echo "Uploaded documentation"
fi
- |
if [[ "$TRAVIS_RUST_VERSION" == "nightly-2019-04-02" ]]; then
taskset -c 0 cargo tarpaulin --out Xml --all --all-features
bash <(curl -s https://codecov.io/bash)
echo "Uploaded code coverage"
fi

View File

@ -1,401 +1,104 @@
# Changes
## [0.6.10] - 2018-05-24
## [1.0.0-beta.1] - 2019-04-xx
### Added
* Allow to use path without traling slashes for scope registration #241
* Add helper functions for reading test response body,
`test::read_response()` and test::read_response_json()`
* Allow to set encoding for exact NamedFile #239
* Add `.peer_addr()` #744
### Changed
* Rename `RouterConfig` to `ServiceConfig`
* Rename `test::call_success` to `test::call_service`
### Fixed
* `TestServer::post()` actually sends `GET` request #240
* Fixed `TestRequest::app_data()`
## 0.6.9 (2018-05-22)
## [1.0.0-alpha.6] - 2019-04-14
* Drop connection if request's payload is not fully consumed #236
### Changed
* Fix streaming response with body compression
* Allow to use any service as default service.
* Remove generic type for request payload, always use default.
## 0.6.8 (2018-05-20)
* Removed `Decompress` middleware. Bytes, String, Json, Form extractors
automatically decompress payload.
* Fix scope resource path extractor #234
* Make extractor config type explicit. Add `FromRequest::Config` associated type.
* Re-use tcp listener on pause/resume
## [1.0.0-alpha.5] - 2019-04-12
## 0.6.7 (2018-05-17)
### Added
* Fix compilation with --no-default-features
* Added async io `TestBuffer` for testing.
### Deleted
## 0.6.6 (2018-05-17)
* Removed native-tls support
* Panic during middleware execution #226
* Add support for listen_tls/listen_ssl #224
## [1.0.0-alpha.4] - 2019-04-08
* Implement extractor for `Session`
### Added
* Ranges header support for NamedFile #60
* `App::configure()` allow to offload app configuration to different methods
* Added `URLPath` option for logger
## 0.6.5 (2018-05-15)
* Added `ServiceRequest::app_data()`, returns `Data<T>`
* Fix error handling during request decoding #222
* Added `ServiceFromRequest::app_data()`, returns `Data<T>`
### Changed
## 0.6.4 (2018-05-11)
* `FromRequest` trait refactoring
* Fix segfault in ServerSettings::get_response_builder()
* Move multipart support to actix-multipart crate
### Fixed
## 0.6.3 (2018-05-10)
* Fix body propagation in Response::from_error. #760
* Add `Router::with_async()` method for async handler registration.
* Added error response functions for 501,502,503,504
## [1.0.0-alpha.3] - 2019-04-02
* Fix client request timeout handling
### Changed
* Renamed `TestRequest::to_service()` to `TestRequest::to_srv_request()`
## 0.6.2 (2018-05-09)
* Renamed `TestRequest::to_response()` to `TestRequest::to_srv_response()`
* WsWriter trait is optional.
* Removed `Deref` impls
### Removed
## 0.6.1 (2018-05-08)
* Removed unused `actix_web::web::md()`
* Fix http/2 payload streaming #215
* Fix connector's default `keep-alive` and `lifetime` settings #212
## [1.0.0-alpha.2] - 2019-03-29
* Send `ErrorNotFound` instead of `ErrorBadRequest` when path extractor fails #214
### Added
* Allow to exclude certain endpoints from logging #211
* rustls support
### Changed
## 0.6.0 (2018-05-08)
* use forked cookie
* Add route scopes #202
* multipart::Field renamed to MultipartField
* Allow to use ssl and non-ssl connections at the same time #206
## [1.0.0-alpha.1] - 2019-03-28
* Websocket CloseCode Empty/Status is ambiguous #193
### Changed
* Add Content-Disposition to NamedFile #204
* Complete architecture re-design.
* Allow to access Error's backtrace object
* Allow to override files listing renderer for `StaticFiles` #203
* Various extractor usability improvements #207
## 0.5.6 (2018-04-24)
* Make flate2 crate optional #200
## 0.5.5 (2018-04-24)
* Fix panic when Websocket is closed with no error code #191
* Allow to use rust backend for flate2 crate #199
## 0.5.4 (2018-04-19)
* Add identity service middleware
* Middleware response() is not invoked if there was an error in async handler #187
* Use Display formatting for InternalError Display implementation #188
## 0.5.3 (2018-04-18)
* Impossible to quote slashes in path parameters #182
## 0.5.2 (2018-04-16)
* Allow to configure StaticFiles's CpuPool, via static method or env variable
* Add support for custom handling of Json extractor errors #181
* Fix StaticFiles does not support percent encoded paths #177
* Fix Client Request with custom Body Stream halting on certain size requests #176
## 0.5.1 (2018-04-12)
* Client connector provides stats, `ClientConnector::stats()`
* Fix end-of-stream handling in parse_payload #173
* Fix StaticFiles generate a lot of threads #174
## 0.5.0 (2018-04-10)
* Type-safe path/query/form parameter handling, using serde #70
* HttpResponse builder's methods `.body()`, `.finish()`, `.json()`
return `HttpResponse` instead of `Result`
* Use more ergonomic `actix_web::Error` instead of `http::Error` for `ClientRequestBuilder::body()`
* Added `signed` and `private` `CookieSessionBackend`s
* Added `HttpRequest::resource()`, returns current matched resource
* Added `ErrorHandlers` middleware
* Fix router cannot parse Non-ASCII characters in URL #137
* Fix client connection pooling
* Fix long client urls #129
* Fix panic on invalid URL characters #130
* Fix logger request duration calculation #152
* Fix prefix and static file serving #168
## 0.4.10 (2018-03-20)
* Use `Error` instead of `InternalError` for `error::ErrorXXXX` methods
* Allow to set client request timeout
* Allow to set client websocket handshake timeout
* Refactor `TestServer` configuration
* Fix server websockets big payloads support
* Fix http/2 date header generation
## 0.4.9 (2018-03-16)
* Allow to disable http/2 support
* Wake payload reading task when data is available
* Fix server keep-alive handling
* Send Query Parameters in client requests #120
* Move brotli encoding to a feature
* Add option of default handler for `StaticFiles` handler #57
* Add basic client connection pooling
## 0.4.8 (2018-03-12)
* Allow to set read buffer capacity for server request
* Handle WouldBlock error for socket accept call
## 0.4.7 (2018-03-11)
* Fix panic on unknown content encoding
* Fix connection get closed too early
* Fix streaming response handling for http/2
* Better sleep on error support
## 0.4.6 (2018-03-10)
* Fix client cookie handling
* Fix json content type detection
* Fix CORS middleware #117
* Optimize websockets stream support
## 0.4.5 (2018-03-07)
* Fix compression #103 and #104
* Fix client cookie handling #111
* Non-blocking processing of a `NamedFile`
* Enable compression support for `NamedFile`
* Better support for `NamedFile` type
* Add `ResponseError` impl for `SendRequestError`. This improves ergonomics of the client.
* Add native-tls support for client
* Allow client connection timeout to be set #108
* Allow to use std::net::TcpListener for HttpServer
* Handle panics in worker threads
## 0.4.4 (2018-03-04)
* Allow to use Arc<Vec<u8>> as response/request body
* Fix handling of requests with an encoded body with a length > 8192 #93
## 0.4.3 (2018-03-03)
* Fix request body read bug
* Fix segmentation fault #79
* Set reuse address before bind #90
## 0.4.2 (2018-03-02)
* Better naming for websockets implementation
* Add `Pattern::with_prefix()`, make it more usable outside of actix
* Add csrf middleware for filter for cross-site request forgery #89
* Fix disconnect on idle connections
## 0.4.1 (2018-03-01)
* Rename `Route::p()` to `Route::filter()`
* Better naming for http codes
* Fix payload parse in situation when socket data is not ready.
* Fix Session mutable borrow lifetime #87
## 0.4.0 (2018-02-28)
* Actix 0.5 compatibility
* Fix request json/urlencoded loaders
* Simplify HttpServer type definition
* Added HttpRequest::encoding() method
* Added HttpRequest::mime_type() method
* Added HttpRequest::uri_mut(), allows to modify request uri
* Added StaticFiles::index_file()
* Added http client
* Added websocket client
* Added TestServer::ws(), test websockets client
* Added TestServer http client support
* Allow to override content encoding on application level
## 0.3.3 (2018-01-25)
* Stop processing any events after context stop
* Re-enable write back-pressure for h1 connections
* Refactor HttpServer::start_ssl() method
* Upgrade openssl to 0.10
## 0.3.2 (2018-01-21)
* Fix HEAD requests handling
* Log request processing errors
* Always enable content encoding if encoding explicitly selected
* Allow multiple Applications on a single server with different state #49
* CORS middleware: allowed_headers is defaulting to None #50
## 0.3.1 (2018-01-13)
* Fix directory entry path #47
* Do not enable chunked encoding for HTTP/1.0
* Allow explicitly disable chunked encoding
## 0.3.0 (2018-01-12)
* HTTP/2 Support
* Refactor streaming responses
* Refactor error handling
* Asynchronous middlewares
* Refactor logger middleware
* Content compression/decompression (br, gzip, deflate)
* Server multi-threading
* Gracefull shutdown support
## 0.2.1 (2017-11-03)
* Allow to start tls server with `HttpServer::serve_tls`
* Export `Frame` enum
* Add conversion impl from `HttpResponse` and `BinaryBody` to a `Frame`
## 0.2.0 (2017-10-30)
* Do not use `http::Uri` as it can not parse some valid paths
* Refactor response `Body`
* Refactor `RouteRecognizer` usability
* Refactor `HttpContext::write`
* Refactor `Payload` stream
* Re-use `BinaryBody` for `Frame::Payload`
* Stop http actor on `write_eof`
* Fix disconnection handling.
## 0.1.0 (2017-10-23)
* First release
* Return 405 response if no matching route found within resource #538

View File

@ -1,116 +1,124 @@
[package]
name = "actix-web"
version = "0.6.10"
version = "1.0.0-alpha.6"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust."
readme = "README.md"
keywords = ["http", "web", "framework", "async", "futures"]
keywords = ["actix", "http", "web", "framework", "async"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-web/"
categories = ["network-programming", "asynchronous",
"web-programming::http-server",
"web-programming::http-client",
"web-programming::websocket"]
license = "MIT/Apache-2.0"
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
build = "build.rs"
edition = "2018"
[badges]
travis-ci = { repository = "actix/actix-web", branch = "master" }
appveyor = { repository = "fafhrd91/actix-web-hdy9d" }
codecov = { repository = "actix/actix-web", branch = "master", service = "github" }
[lib]
name = "actix_web"
path = "src/lib.rs"
[workspace]
members = [
".",
"awc",
"actix-http",
"actix-files",
"actix-framed",
"actix-session",
"actix-multipart",
"actix-web-actors",
"actix-web-codegen",
"test-server",
]
[package.metadata.docs.rs]
features = ["ssl", "brotli", "flate2-zlib", "secure-cookies", "client", "rust-tls"]
[features]
default = ["session", "brotli", "flate2-c"]
default = ["brotli", "flate2-zlib", "secure-cookies", "client"]
# tls
tls = ["native-tls", "tokio-tls"]
# openssl
alpn = ["openssl", "tokio-openssl"]
# sessions feature, session require "ring" crate and c compiler
session = ["cookie/secure"]
# http client
client = ["awc"]
# brotli encoding, requires c compiler
brotli = ["brotli2"]
brotli = ["actix-http/brotli"]
# miniz-sys backend for flate2 crate
flate2-c = ["flate2/miniz-sys"]
flate2-zlib = ["actix-http/flate2-zlib"]
# rust backend for flate2 crate
flate2-rust = ["flate2/rust_backend"]
flate2-rust = ["actix-http/flate2-rust"]
[dependencies]
actix = "^0.5.5"
base64 = "0.9"
bitflags = "1.0"
failure = "0.1.1"
h2 = "0.1"
http = "^0.1.5"
httparse = "1.2"
http-range = "0.1"
libc = "0.2"
log = "0.4"
mime = "0.3"
mime_guess = "2.0.0-alpha"
num_cpus = "1.0"
percent-encoding = "1.0"
rand = "0.4"
regex = "1.0"
serde = "1.0"
serde_json = "1.0"
serde_urlencoded = "0.5"
sha1 = "0.6"
smallvec = "0.6"
time = "0.1"
encoding = "0.2"
language-tags = "0.2"
lazy_static = "1.0"
url = { version="1.7", features=["query_encoding"] }
cookie = { version="0.10", features=["percent-encode"] }
brotli2 = { version="^0.3.2", optional = true }
flate2 = { version="1.0", optional = true, default-features = false }
# io
mio = "^0.6.13"
net2 = "0.2"
bytes = "0.4"
byteorder = "1"
futures = "0.1"
futures-cpupool = "0.1"
slab = "0.4"
tokio-io = "0.1"
tokio-core = "0.1"
# native-tls
native-tls = { version="0.1", optional = true }
tokio-tls = { version="0.1", optional = true }
# sessions feature, session require "ring" crate and c compiler
secure-cookies = ["actix-http/secure-cookies"]
# openssl
ssl = ["openssl", "actix-server/ssl", "awc/ssl"]
# rustls
rust-tls = ["rustls", "actix-server/rust-tls"]
[dependencies]
actix-codec = "0.1.2"
actix-service = "0.3.6"
actix-utils = "0.3.4"
actix-router = "0.1.2"
actix-rt = "0.2.2"
actix-web-codegen = "0.1.0-alpha.6"
actix-http = { version = "0.1.0", features=["fail"] }
actix-server = "0.4.3"
actix-server-config = "0.1.1"
actix-threadpool = "0.1.0"
awc = { version = "0.1.0", optional = true }
bytes = "0.4"
derive_more = "0.14"
encoding = "0.2"
futures = "0.1"
hashbrown = "0.2.2"
log = "0.4"
mime = "0.3"
net2 = "0.2.33"
parking_lot = "0.7"
regex = "1.0"
serde = { version = "1.0", features=["derive"] }
serde_json = "1.0"
serde_urlencoded = "0.5.3"
time = "0.1"
url = { version="1.7", features=["query_encoding"] }
# ssl support
openssl = { version="0.10", optional = true }
tokio-openssl = { version="0.2", optional = true }
rustls = { version = "^0.15", optional = true }
[dev-dependencies]
env_logger = "0.5"
actix-http = { version = "0.1.0", features=["ssl", "brotli", "flate2-zlib"] }
actix-http-test = { version = "0.1.0-alpha.3", features=["ssl"] }
actix-files = { version = "0.1.0-alpha.6" }
rand = "0.6"
env_logger = "0.6"
serde_derive = "1.0"
[build-dependencies]
version_check = "0.1"
tokio-timer = "0.2.8"
brotli2 = "0.3.2"
flate2 = "1.0.2"
[profile.release]
lto = true
opt-level = 3
codegen-units = 1
[workspace]
members = [
"./",
"tools/wsload/",
]
[patch.crates-io]
actix-web = { path = "." }
actix-http = { path = "actix-http" }
actix-http-test = { path = "test-server" }
actix-web-codegen = { path = "actix-web-codegen" }
actix-web-actors = { path = "actix-web-actors" }
actix-session = { path = "actix-session" }
actix-files = { path = "actix-files" }
awc = { path = "awc" }

View File

@ -1,4 +1,338 @@
## Migration from 0.5 to 0.6
## 1.0
* Resource registration. 1.0 version uses generalized resource
registration via `.service()` method.
instead of
```rust
App.new().resource("/welcome", |r| r.f(welcome))
```
use App's or Scope's `.service()` method. `.service()` method accepts
object that implements `HttpServiceFactory` trait. By default
actix-web provides `Resource` and `Scope` services.
```rust
App.new().service(
web::resource("/welcome")
.route(web::get().to(welcome))
.route(web::post().to(post_handler))
```
* Scope registration.
instead of
```rust
let app = App::new().scope("/{project_id}", |scope| {
scope
.resource("/path1", |r| r.f(|_| HttpResponse::Ok()))
.resource("/path2", |r| r.f(|_| HttpResponse::Ok()))
.resource("/path3", |r| r.f(|_| HttpResponse::MethodNotAllowed()))
});
```
use `.service()` for registration and `web::scope()` as scope object factory.
```rust
let app = App::new().service(
web::scope("/{project_id}")
.service(web::resource("/path1").to(|| HttpResponse::Ok()))
.service(web::resource("/path2").to(|| HttpResponse::Ok()))
.service(web::resource("/path3").to(|| HttpResponse::MethodNotAllowed()))
);
```
* `.with()`, `.with_async()` registration methods have been renamed to `.to()` and `.to_async()`.
instead of
```rust
App.new().resource("/welcome", |r| r.with(welcome))
```
use `.to()` or `.to_async()` methods
```rust
App.new().service(web::resource("/welcome").to(welcome))
```
* Passing arguments to handler with extractors, multiple arguments are allowed
instead of
```rust
fn welcome((body, req): (Bytes, HttpRequest)) -> ... {
...
}
```
use multiple arguments
```rust
fn welcome(body: Bytes, req: HttpRequest) -> ... {
...
}
```
* `.f()`, `.a()` and `.h()` handler registration methods have been removed.
Use `.to()` for handlers and `.to_async()` for async handlers. Handler function
must use extractors.
instead of
```rust
App.new().resource("/welcome", |r| r.f(welcome))
```
use App's `to()` or `to_async()` methods
```rust
App.new().service(web::resource("/welcome").to(welcome))
```
* `State` is now `Data`. You register Data during the App initialization process
and then access it from handlers either using a Data extractor or using
HttpRequest's api.
instead of
```rust
App.with_state(T)
```
use App's `data` method
```rust
App.new()
.data(T)
```
and either use the Data extractor within your handler
```rust
use actix_web::web::Data;
fn endpoint_handler(Data<T>)){
...
}
```
.. or access your Data element from the HttpRequest
```rust
fn endpoint_handler(req: HttpRequest) {
let data: Option<Data<T>> = req.app_data::<T>();
}
```
* AsyncResponder is removed.
instead of
```rust
use actix_web::AsyncResponder;
fn endpoint_handler(...) -> impl Future<Item=HttpResponse, Error=Error>{
...
.responder()
}
```
.. simply omit AsyncResponder and the corresponding responder() finish method
* Middleware
instead of
```rust
let app = App::new()
.middleware(middleware::Logger::default())
```
use `.wrap()` method
```rust
let app = App::new()
.wrap(middleware::Logger::default())
.route("/index.html", web::get().to(index));
```
* `HttpRequest::body()`, `HttpRequest::urlencoded()`, `HttpRequest::json()`, `HttpRequest::multipart()`
method have been removed. Use `Bytes`, `String`, `Form`, `Json`, `Multipart` extractors instead.
instead if
```rust
fn index(req: &HttpRequest) -> Responder {
req.body()
.and_then(|body| {
...
})
}
use
```rust
fn index(body: Bytes) -> Responder {
...
}
```
* `actix_web::server` module has been removed. To start http server use `actix_web::HttpServer` type
* StaticFiles and NamedFile has been move to separate create.
instead of `use actix_web::fs::StaticFile`
use `use actix_files::Files`
instead of `use actix_web::fs::Namedfile`
use `use actix_files::NamedFile`
* Multipart has been move to separate create.
instead of `use actix_web::multipart::Multipart`
use `use actix_multipart::Multipart`
* Response compression is not enabled by default.
To enable, use `Compress` middleware, `App::new().wrap(Compress::default())`.
* Session middleware moved to actix-session crate
* Actors support have been moved to `actix-web-actors` crate
## 0.7.15
* The `' '` character is not percent decoded anymore before matching routes. If you need to use it in
your routes, you should use `%20`.
instead of
```rust
fn main() {
let app = App::new().resource("/my index", |r| {
r.method(http::Method::GET)
.with(index);
});
}
```
use
```rust
fn main() {
let app = App::new().resource("/my%20index", |r| {
r.method(http::Method::GET)
.with(index);
});
}
```
* If you used `AsyncResult::async` you need to replace it with `AsyncResult::future`
## 0.7.4
* `Route::with_config()`/`Route::with_async_config()` always passes configuration objects as tuple
even for handler with one parameter.
## 0.7
* `HttpRequest` does not implement `Stream` anymore. If you need to read request payload
use `HttpMessage::payload()` method.
instead of
```rust
fn index(req: HttpRequest) -> impl Responder {
req
.from_err()
.fold(...)
....
}
```
use `.payload()`
```rust
fn index(req: HttpRequest) -> impl Responder {
req
.payload() // <- get request payload stream
.from_err()
.fold(...)
....
}
```
* [Middleware](https://actix.rs/actix-web/actix_web/middleware/trait.Middleware.html)
trait uses `&HttpRequest` instead of `&mut HttpRequest`.
* Removed `Route::with2()` and `Route::with3()` use tuple of extractors instead.
instead of
```rust
fn index(query: Query<..>, info: Json<MyStruct) -> impl Responder {}
```
use tuple of extractors and use `.with()` for registration:
```rust
fn index((query, json): (Query<..>, Json<MyStruct)) -> impl Responder {}
```
* `Handler::handle()` uses `&self` instead of `&mut self`
* `Handler::handle()` accepts reference to `HttpRequest<_>` instead of value
* Removed deprecated `HttpServer::threads()`, use
[HttpServer::workers()](https://actix.rs/actix-web/actix_web/server/struct.HttpServer.html#method.workers) instead.
* Renamed `client::ClientConnectorError::Connector` to
`client::ClientConnectorError::Resolver`
* `Route::with()` does not return `ExtractorConfig`, to configure
extractor use `Route::with_config()`
instead of
```rust
fn main() {
let app = App::new().resource("/index.html", |r| {
r.method(http::Method::GET)
.with(index)
.limit(4096); // <- limit size of the payload
});
}
```
use
```rust
fn main() {
let app = App::new().resource("/index.html", |r| {
r.method(http::Method::GET)
.with_config(index, |cfg| { // <- register handler
cfg.limit(4096); // <- limit size of the payload
})
});
}
```
* `Route::with_async()` does not return `ExtractorConfig`, to configure
extractor use `Route::with_async_config()`
## 0.6
* `Path<T>` extractor return `ErrorNotFound` on failure instead of `ErrorBadRequest`
@ -50,7 +384,7 @@
you need to use `use actix_web::ws::WsWriter`
## Migration from 0.4 to 0.5
## 0.5
* `HttpResponseBuilder::body()`, `.finish()`, `.json()`
methods return `HttpResponse` instead of `Result<HttpResponse>`

View File

@ -1,14 +0,0 @@
.PHONY: default build test doc book clean
CARGO_FLAGS := --features "$(FEATURES) alpn"
default: test
build:
cargo build $(CARGO_FLAGS)
test: build clippy
cargo test $(CARGO_FLAGS)
doc: build
cargo doc --no-deps $(CARGO_FLAGS)

View File

@ -1,51 +1,44 @@
# Actix web [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![Build status](https://ci.appveyor.com/api/projects/status/kkdb4yce7qhm5w85/branch/master?svg=true)](https://ci.appveyor.com/project/fafhrd91/actix-web-hdy9d/branch/master) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-web)](https://crates.io/crates/actix-web) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
# Actix web [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-web)](https://crates.io/crates/actix-web) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
Actix web is a simple, pragmatic and extremely fast web framework for Rust.
* Supported *HTTP/1.x* and [*HTTP/2.0*](https://actix.rs/docs/http2/) protocols
* Supported *HTTP/1.x* and *HTTP/2.0* protocols
* Streaming and pipelining
* Keep-alive and slow requests handling
* Client/server [WebSockets](https://actix.rs/docs/websockets/) support
* Transparent content compression/decompression (br, gzip, deflate)
* Configurable [request routing](https://actix.rs/docs/url-dispatch/)
* Graceful server shutdown
* Multipart streams
* Static assets
* SSL support with OpenSSL or `native-tls`
* Middlewares ([Logger](https://actix.rs/book/actix-web/sec-9-middlewares.html#logging),
[Session](https://actix.rs/book/actix-web/sec-9-middlewares.html#user-sessions),
[Redis sessions](https://github.com/actix/actix-redis),
[DefaultHeaders](https://actix.rs/book/actix-web/sec-9-middlewares.html#default-headers),
[CORS](https://actix.rs/actix-web/actix_web/middleware/cors/index.html),
[CSRF](https://actix.rs/actix-web/actix_web/middleware/csrf/index.html))
* SSL support with OpenSSL or Rustls
* Middlewares ([Logger, Session, CORS, CSRF, etc](https://actix.rs/docs/middleware/))
* Includes an asynchronous [HTTP client](https://actix.rs/actix-web/actix_web/client/index.html)
* Built on top of [Actix actor framework](https://github.com/actix/actix)
* Supports [Actix actor framework](https://github.com/actix/actix)
## Documentation & community resources
* [User Guide](https://actix.rs/docs/)
* [API Documentation (Development)](https://actix.rs/actix-web/actix_web/)
* [API Documentation (Releases)](https://docs.rs/actix-web/)
* [API Documentation (1.0)](https://docs.rs/actix-web/)
* [API Documentation (0.7)](https://docs.rs/actix-web/0.7.19/actix_web/)
* [Chat on gitter](https://gitter.im/actix/actix)
* Cargo package: [actix-web](https://crates.io/crates/actix-web)
* Minimum supported Rust version: 1.24 or later
* Minimum supported Rust version: 1.32 or later
## Example
```rust
extern crate actix_web;
use actix_web::{http, server, App, Path, Responder};
use actix_web::{web, App, HttpServer, Responder};
fn index(info: Path<(u32, String)>) -> impl Responder {
fn index(info: web::Path<(u32, String)>) -> impl Responder {
format!("Hello {}! id:{}", info.1, info.0)
}
fn main() {
server::new(
|| App::new()
.route("/{id}/{name}/index.html", http::Method::GET, index))
.bind("127.0.0.1:8080").unwrap()
.run();
fn main() -> std::io::Result<()> {
HttpServer::new(
|| App::new().service(
web::resource("/{id}/{name}/index.html").to(index)))
.bind("127.0.0.1:8080")?
.run()
}
```
@ -53,10 +46,9 @@ fn main() {
* [Basics](https://github.com/actix/examples/tree/master/basics/)
* [Stateful](https://github.com/actix/examples/tree/master/state/)
* [Protobuf support](https://github.com/actix/examples/tree/master/protobuf/)
* [Multipart streams](https://github.com/actix/examples/tree/master/multipart/)
* [Simple websocket](https://github.com/actix/examples/tree/master/websocket/)
* [Tera](https://github.com/actix/examples/tree/master/template_tera/) /
* [Tera](https://github.com/actix/examples/tree/master/template_tera/) /
[Askama](https://github.com/actix/examples/tree/master/template_askama/) templates
* [Diesel integration](https://github.com/actix/examples/tree/master/diesel/)
* [r2d2](https://github.com/actix/examples/tree/master/r2d2/)
@ -69,9 +61,7 @@ You may consider checking out
## Benchmarks
* [TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r15&hw=ph&test=plaintext)
* Some basic benchmarks could be found in this [repository](https://github.com/fafhrd91/benchmarks).
* [TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r16&hw=ph&test=plaintext)
## License

17
actix-files/CHANGES.md Normal file
View File

@ -0,0 +1,17 @@
# Changes
## [0.1.0-alpha.6] - 2019-04-14
* Update actix-web to alpha6
## [0.1.0-alpha.4] - 2019-04-08
* Update actix-web to alpha4
## [0.1.0-alpha.2] - 2019-04-02
* Add default handler support
## [0.1.0-alpha.1] - 2019-03-28
* Initial impl

34
actix-files/Cargo.toml Normal file
View File

@ -0,0 +1,34 @@
[package]
name = "actix-files"
version = "0.1.0-alpha.6"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Static files support for actix web."
readme = "README.md"
keywords = ["actix", "http", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-files/"
categories = ["asynchronous", "web-programming::http-server"]
license = "MIT/Apache-2.0"
edition = "2018"
workspace = ".."
[lib]
name = "actix_files"
path = "src/lib.rs"
[dependencies]
actix-web = "1.0.0-alpha.6"
actix-service = "0.3.4"
bitflags = "1"
bytes = "0.4"
futures = "0.1.25"
derive_more = "0.14"
log = "0.4"
mime = "0.3"
mime_guess = "2.0.0-alpha"
percent-encoding = "1.0"
v_htmlescape = "0.4"
[dev-dependencies]
actix-web = { version = "1.0.0-alpha.6", features=["ssl"] }

1
actix-files/README.md Normal file
View File

@ -0,0 +1 @@
# Static files support for actix web [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-files)](https://crates.io/crates/actix-files) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)

41
actix-files/src/error.rs Normal file
View File

@ -0,0 +1,41 @@
use actix_web::{http::StatusCode, HttpResponse, ResponseError};
use derive_more::Display;
/// Errors which can occur when serving static files.
#[derive(Display, Debug, PartialEq)]
pub enum FilesError {
/// Path is not a directory
#[display(fmt = "Path is not a directory. Unable to serve static files")]
IsNotDirectory,
/// Cannot render directory
#[display(fmt = "Unable to render directory without index file")]
IsDirectory,
}
/// Return `NotFound` for `FilesError`
impl ResponseError for FilesError {
fn error_response(&self) -> HttpResponse {
HttpResponse::new(StatusCode::NOT_FOUND)
}
}
#[derive(Display, Debug, PartialEq)]
pub enum UriSegmentError {
/// The segment started with the wrapped invalid character.
#[display(fmt = "The segment started with the wrapped invalid character")]
BadStart(char),
/// The segment contained the wrapped invalid character.
#[display(fmt = "The segment contained the wrapped invalid character")]
BadChar(char),
/// The segment ended with the wrapped invalid character.
#[display(fmt = "The segment ended with the wrapped invalid character")]
BadEnd(char),
}
/// Return `BadRequest` for `UriSegmentError`
impl ResponseError for UriSegmentError {
fn error_response(&self) -> HttpResponse {
HttpResponse::new(StatusCode::BAD_REQUEST)
}
}

1227
actix-files/src/lib.rs Normal file

File diff suppressed because it is too large Load Diff

429
actix-files/src/named.rs Normal file
View File

@ -0,0 +1,429 @@
use std::fs::{File, Metadata};
use std::io;
use std::ops::{Deref, DerefMut};
use std::path::{Path, PathBuf};
use std::time::{SystemTime, UNIX_EPOCH};
#[cfg(unix)]
use std::os::unix::fs::MetadataExt;
use bitflags::bitflags;
use mime;
use mime_guess::guess_mime_type;
use actix_web::http::header::{
self, ContentDisposition, DispositionParam, DispositionType,
};
use actix_web::http::{ContentEncoding, Method, StatusCode};
use actix_web::middleware::BodyEncoding;
use actix_web::{Error, HttpMessage, HttpRequest, HttpResponse, Responder};
use crate::range::HttpRange;
use crate::ChunkedReadFile;
bitflags! {
pub(crate) struct Flags: u32 {
const ETAG = 0b00000001;
const LAST_MD = 0b00000010;
}
}
impl Default for Flags {
fn default() -> Self {
Flags::all()
}
}
/// A file with an associated name.
#[derive(Debug)]
pub struct NamedFile {
path: PathBuf,
file: File,
pub(crate) content_type: mime::Mime,
pub(crate) content_disposition: header::ContentDisposition,
pub(crate) md: Metadata,
modified: Option<SystemTime>,
pub(crate) encoding: Option<ContentEncoding>,
pub(crate) status_code: StatusCode,
pub(crate) flags: Flags,
}
impl NamedFile {
/// Creates an instance from a previously opened file.
///
/// The given `path` need not exist and is only used to determine the `ContentType` and
/// `ContentDisposition` headers.
///
/// # Examples
///
/// ```rust
/// use actix_files::NamedFile;
/// use std::io::{self, Write};
/// use std::env;
/// use std::fs::File;
///
/// fn main() -> io::Result<()> {
/// let mut file = File::create("foo.txt")?;
/// file.write_all(b"Hello, world!")?;
/// let named_file = NamedFile::from_file(file, "bar.txt")?;
/// Ok(())
/// }
/// ```
pub fn from_file<P: AsRef<Path>>(file: File, path: P) -> io::Result<NamedFile> {
let path = path.as_ref().to_path_buf();
// Get the name of the file and use it to construct default Content-Type
// and Content-Disposition values
let (content_type, content_disposition) = {
let filename = match path.file_name() {
Some(name) => name.to_string_lossy(),
None => {
return Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Provided path has no filename",
));
}
};
let ct = guess_mime_type(&path);
let disposition_type = match ct.type_() {
mime::IMAGE | mime::TEXT | mime::VIDEO => DispositionType::Inline,
_ => DispositionType::Attachment,
};
let cd = ContentDisposition {
disposition: disposition_type,
parameters: vec![DispositionParam::Filename(filename.into_owned())],
};
(ct, cd)
};
let md = file.metadata()?;
let modified = md.modified().ok();
let encoding = None;
Ok(NamedFile {
path,
file,
content_type,
content_disposition,
md,
modified,
encoding,
status_code: StatusCode::OK,
flags: Flags::default(),
})
}
/// Attempts to open a file in read-only mode.
///
/// # Examples
///
/// ```rust
/// use actix_files::NamedFile;
///
/// let file = NamedFile::open("foo.txt");
/// ```
pub fn open<P: AsRef<Path>>(path: P) -> io::Result<NamedFile> {
Self::from_file(File::open(&path)?, path)
}
/// Returns reference to the underlying `File` object.
#[inline]
pub fn file(&self) -> &File {
&self.file
}
/// Retrieve the path of this file.
///
/// # Examples
///
/// ```rust
/// # use std::io;
/// use actix_files::NamedFile;
///
/// # fn path() -> io::Result<()> {
/// let file = NamedFile::open("test.txt")?;
/// assert_eq!(file.path().as_os_str(), "foo.txt");
/// # Ok(())
/// # }
/// ```
#[inline]
pub fn path(&self) -> &Path {
self.path.as_path()
}
/// Set response **Status Code**
pub fn set_status_code(mut self, status: StatusCode) -> Self {
self.status_code = status;
self
}
/// Set the MIME Content-Type for serving this file. By default
/// the Content-Type is inferred from the filename extension.
#[inline]
pub fn set_content_type(mut self, mime_type: mime::Mime) -> Self {
self.content_type = mime_type;
self
}
/// Set the Content-Disposition for serving this file. This allows
/// changing the inline/attachment disposition as well as the filename
/// sent to the peer. By default the disposition is `inline` for text,
/// image, and video content types, and `attachment` otherwise, and
/// the filename is taken from the path provided in the `open` method
/// after converting it to UTF-8 using
/// [to_string_lossy](https://doc.rust-lang.org/std/ffi/struct.OsStr.html#method.to_string_lossy).
#[inline]
pub fn set_content_disposition(mut self, cd: header::ContentDisposition) -> Self {
self.content_disposition = cd;
self
}
/// Set content encoding for serving this file
#[inline]
pub fn set_content_encoding(mut self, enc: ContentEncoding) -> Self {
self.encoding = Some(enc);
self
}
#[inline]
///Specifies whether to use ETag or not.
///
///Default is true.
pub fn use_etag(mut self, value: bool) -> Self {
self.flags.set(Flags::ETAG, value);
self
}
#[inline]
///Specifies whether to use Last-Modified or not.
///
///Default is true.
pub fn use_last_modified(mut self, value: bool) -> Self {
self.flags.set(Flags::LAST_MD, value);
self
}
pub(crate) fn etag(&self) -> Option<header::EntityTag> {
// This etag format is similar to Apache's.
self.modified.as_ref().map(|mtime| {
let ino = {
#[cfg(unix)]
{
self.md.ino()
}
#[cfg(not(unix))]
{
0
}
};
let dur = mtime
.duration_since(UNIX_EPOCH)
.expect("modification time must be after epoch");
header::EntityTag::strong(format!(
"{:x}:{:x}:{:x}:{:x}",
ino,
self.md.len(),
dur.as_secs(),
dur.subsec_nanos()
))
})
}
pub(crate) fn last_modified(&self) -> Option<header::HttpDate> {
self.modified.map(|mtime| mtime.into())
}
}
impl Deref for NamedFile {
type Target = File;
fn deref(&self) -> &File {
&self.file
}
}
impl DerefMut for NamedFile {
fn deref_mut(&mut self) -> &mut File {
&mut self.file
}
}
/// Returns true if `req` has no `If-Match` header or one which matches `etag`.
fn any_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
match req.get_header::<header::IfMatch>() {
None | Some(header::IfMatch::Any) => true,
Some(header::IfMatch::Items(ref items)) => {
if let Some(some_etag) = etag {
for item in items {
if item.strong_eq(some_etag) {
return true;
}
}
}
false
}
}
}
/// Returns true if `req` doesn't have an `If-None-Match` header matching `req`.
fn none_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
match req.get_header::<header::IfNoneMatch>() {
Some(header::IfNoneMatch::Any) => false,
Some(header::IfNoneMatch::Items(ref items)) => {
if let Some(some_etag) = etag {
for item in items {
if item.weak_eq(some_etag) {
return false;
}
}
}
true
}
None => true,
}
}
impl Responder for NamedFile {
type Error = Error;
type Future = Result<HttpResponse, Error>;
fn respond_to(self, req: &HttpRequest) -> Self::Future {
if self.status_code != StatusCode::OK {
let mut resp = HttpResponse::build(self.status_code);
resp.set(header::ContentType(self.content_type.clone()))
.header(
header::CONTENT_DISPOSITION,
self.content_disposition.to_string(),
);
if let Some(current_encoding) = self.encoding {
resp.encoding(current_encoding);
}
let reader = ChunkedReadFile {
size: self.md.len(),
offset: 0,
file: Some(self.file),
fut: None,
counter: 0,
};
return Ok(resp.streaming(reader));
}
match req.method() {
&Method::HEAD | &Method::GET => (),
_ => {
return Ok(HttpResponse::MethodNotAllowed()
.header(header::CONTENT_TYPE, "text/plain")
.header(header::ALLOW, "GET, HEAD")
.body("This resource only supports GET and HEAD."));
}
}
let etag = if self.flags.contains(Flags::ETAG) {
self.etag()
} else {
None
};
let last_modified = if self.flags.contains(Flags::LAST_MD) {
self.last_modified()
} else {
None
};
// check preconditions
let precondition_failed = if !any_match(etag.as_ref(), req) {
true
} else if let (Some(ref m), Some(header::IfUnmodifiedSince(ref since))) =
(last_modified, req.get_header())
{
m > since
} else {
false
};
// check last modified
let not_modified = if !none_match(etag.as_ref(), req) {
true
} else if req.headers().contains_key(&header::IF_NONE_MATCH) {
false
} else if let (Some(ref m), Some(header::IfModifiedSince(ref since))) =
(last_modified, req.get_header())
{
m <= since
} else {
false
};
let mut resp = HttpResponse::build(self.status_code);
resp.set(header::ContentType(self.content_type.clone()))
.header(
header::CONTENT_DISPOSITION,
self.content_disposition.to_string(),
);
// default compressing
if let Some(current_encoding) = self.encoding {
resp.encoding(current_encoding);
}
resp.if_some(last_modified, |lm, resp| {
resp.set(header::LastModified(lm));
})
.if_some(etag, |etag, resp| {
resp.set(header::ETag(etag));
});
resp.header(header::ACCEPT_RANGES, "bytes");
let mut length = self.md.len();
let mut offset = 0;
// check for range header
if let Some(ranges) = req.headers().get(&header::RANGE) {
if let Ok(rangesheader) = ranges.to_str() {
if let Ok(rangesvec) = HttpRange::parse(rangesheader, length) {
length = rangesvec[0].length;
offset = rangesvec[0].start;
resp.encoding(ContentEncoding::Identity);
resp.header(
header::CONTENT_RANGE,
format!(
"bytes {}-{}/{}",
offset,
offset + length - 1,
self.md.len()
),
);
} else {
resp.header(header::CONTENT_RANGE, format!("bytes */{}", length));
return Ok(resp.status(StatusCode::RANGE_NOT_SATISFIABLE).finish());
};
} else {
return Ok(resp.status(StatusCode::BAD_REQUEST).finish());
};
};
resp.header(header::CONTENT_LENGTH, format!("{}", length));
if precondition_failed {
return Ok(resp.status(StatusCode::PRECONDITION_FAILED).finish());
} else if not_modified {
return Ok(resp.status(StatusCode::NOT_MODIFIED).finish());
}
if *req.method() == Method::HEAD {
Ok(resp.finish())
} else {
let reader = ChunkedReadFile {
offset,
size: length,
file: Some(self.file),
fut: None,
counter: 0,
};
if offset != 0 || length != self.md.len() {
return Ok(resp.status(StatusCode::PARTIAL_CONTENT).streaming(reader));
};
Ok(resp.streaming(reader))
}
}
}

375
actix-files/src/range.rs Normal file
View File

@ -0,0 +1,375 @@
/// HTTP Range header representation.
#[derive(Debug, Clone, Copy)]
pub struct HttpRange {
pub start: u64,
pub length: u64,
}
static PREFIX: &'static str = "bytes=";
const PREFIX_LEN: usize = 6;
impl HttpRange {
/// Parses Range HTTP header string as per RFC 2616.
///
/// `header` is HTTP Range header (e.g. `bytes=bytes=0-9`).
/// `size` is full size of response (file).
pub fn parse(header: &str, size: u64) -> Result<Vec<HttpRange>, ()> {
if header.is_empty() {
return Ok(Vec::new());
}
if !header.starts_with(PREFIX) {
return Err(());
}
let size_sig = size as i64;
let mut no_overlap = false;
let all_ranges: Vec<Option<HttpRange>> = header[PREFIX_LEN..]
.split(',')
.map(|x| x.trim())
.filter(|x| !x.is_empty())
.map(|ra| {
let mut start_end_iter = ra.split('-');
let start_str = start_end_iter.next().ok_or(())?.trim();
let end_str = start_end_iter.next().ok_or(())?.trim();
if start_str.is_empty() {
// If no start is specified, end specifies the
// range start relative to the end of the file.
let mut length: i64 = end_str.parse().map_err(|_| ())?;
if length > size_sig {
length = size_sig;
}
Ok(Some(HttpRange {
start: (size_sig - length) as u64,
length: length as u64,
}))
} else {
let start: i64 = start_str.parse().map_err(|_| ())?;
if start < 0 {
return Err(());
}
if start >= size_sig {
no_overlap = true;
return Ok(None);
}
let length = if end_str.is_empty() {
// If no end is specified, range extends to end of the file.
size_sig - start
} else {
let mut end: i64 = end_str.parse().map_err(|_| ())?;
if start > end {
return Err(());
}
if end >= size_sig {
end = size_sig - 1;
}
end - start + 1
};
Ok(Some(HttpRange {
start: start as u64,
length: length as u64,
}))
}
})
.collect::<Result<_, _>>()?;
let ranges: Vec<HttpRange> = all_ranges.into_iter().filter_map(|x| x).collect();
if no_overlap && ranges.is_empty() {
return Err(());
}
Ok(ranges)
}
}
#[cfg(test)]
mod tests {
use super::*;
struct T(&'static str, u64, Vec<HttpRange>);
#[test]
fn test_parse() {
let tests = vec![
T("", 0, vec![]),
T("", 1000, vec![]),
T("foo", 0, vec![]),
T("bytes=", 0, vec![]),
T("bytes=7", 10, vec![]),
T("bytes= 7 ", 10, vec![]),
T("bytes=1-", 0, vec![]),
T("bytes=5-4", 10, vec![]),
T("bytes=0-2,5-4", 10, vec![]),
T("bytes=2-5,4-3", 10, vec![]),
T("bytes=--5,4--3", 10, vec![]),
T("bytes=A-", 10, vec![]),
T("bytes=A- ", 10, vec![]),
T("bytes=A-Z", 10, vec![]),
T("bytes= -Z", 10, vec![]),
T("bytes=5-Z", 10, vec![]),
T("bytes=Ran-dom, garbage", 10, vec![]),
T("bytes=0x01-0x02", 10, vec![]),
T("bytes= ", 10, vec![]),
T("bytes= , , , ", 10, vec![]),
T(
"bytes=0-9",
10,
vec![HttpRange {
start: 0,
length: 10,
}],
),
T(
"bytes=0-",
10,
vec![HttpRange {
start: 0,
length: 10,
}],
),
T(
"bytes=5-",
10,
vec![HttpRange {
start: 5,
length: 5,
}],
),
T(
"bytes=0-20",
10,
vec![HttpRange {
start: 0,
length: 10,
}],
),
T(
"bytes=15-,0-5",
10,
vec![HttpRange {
start: 0,
length: 6,
}],
),
T(
"bytes=1-2,5-",
10,
vec![
HttpRange {
start: 1,
length: 2,
},
HttpRange {
start: 5,
length: 5,
},
],
),
T(
"bytes=-2 , 7-",
11,
vec![
HttpRange {
start: 9,
length: 2,
},
HttpRange {
start: 7,
length: 4,
},
],
),
T(
"bytes=0-0 ,2-2, 7-",
11,
vec![
HttpRange {
start: 0,
length: 1,
},
HttpRange {
start: 2,
length: 1,
},
HttpRange {
start: 7,
length: 4,
},
],
),
T(
"bytes=-5",
10,
vec![HttpRange {
start: 5,
length: 5,
}],
),
T(
"bytes=-15",
10,
vec![HttpRange {
start: 0,
length: 10,
}],
),
T(
"bytes=0-499",
10000,
vec![HttpRange {
start: 0,
length: 500,
}],
),
T(
"bytes=500-999",
10000,
vec![HttpRange {
start: 500,
length: 500,
}],
),
T(
"bytes=-500",
10000,
vec![HttpRange {
start: 9500,
length: 500,
}],
),
T(
"bytes=9500-",
10000,
vec![HttpRange {
start: 9500,
length: 500,
}],
),
T(
"bytes=0-0,-1",
10000,
vec![
HttpRange {
start: 0,
length: 1,
},
HttpRange {
start: 9999,
length: 1,
},
],
),
T(
"bytes=500-600,601-999",
10000,
vec![
HttpRange {
start: 500,
length: 101,
},
HttpRange {
start: 601,
length: 399,
},
],
),
T(
"bytes=500-700,601-999",
10000,
vec![
HttpRange {
start: 500,
length: 201,
},
HttpRange {
start: 601,
length: 399,
},
],
),
// Match Apache laxity:
T(
"bytes= 1 -2 , 4- 5, 7 - 8 , ,,",
11,
vec![
HttpRange {
start: 1,
length: 2,
},
HttpRange {
start: 4,
length: 2,
},
HttpRange {
start: 7,
length: 2,
},
],
),
];
for t in tests {
let header = t.0;
let size = t.1;
let expected = t.2;
let res = HttpRange::parse(header, size);
if res.is_err() {
if expected.is_empty() {
continue;
} else {
assert!(
false,
"parse({}, {}) returned error {:?}",
header,
size,
res.unwrap_err()
);
}
}
let got = res.unwrap();
if got.len() != expected.len() {
assert!(
false,
"len(parseRange({}, {})) = {}, want {}",
header,
size,
got.len(),
expected.len()
);
continue;
}
for i in 0..expected.len() {
if got[i].start != expected[i].start {
assert!(
false,
"parseRange({}, {})[{}].start = {}, want {}",
header, size, i, got[i].start, expected[i].start
)
}
if got[i].length != expected[i].length {
assert!(
false,
"parseRange({}, {})[{}].length = {}, want {}",
header, size, i, got[i].length, expected[i].length
)
}
}
}
}
}

View File

@ -0,0 +1 @@
<EFBFBD>TǑɂV<EFBFBD>2<EFBFBD>vI<EFBFBD><EFBFBD><EFBFBD>\<5C><52><CB99><EFBFBD>e<EFBFBD><04>vD<76>:藽<>RV<03>Yp<59><70>;<3B><>G<><47>p!2<7F>C<EFBFBD>.<2E> <0C><><EFBFBD><EFBFBD>pA !<21>ߦ<EFBFBD>x j+Uc<55><63><EFBFBD>X<13>c%<17>;<3B>"y<10><>AI

View File

Before

Width:  |  Height:  |  Size: 168 B

After

Width:  |  Height:  |  Size: 168 B

37
actix-framed/Cargo.toml Normal file
View File

@ -0,0 +1,37 @@
[package]
name = "actix-framed"
version = "0.1.0-alpha.1"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix framed app server"
readme = "README.md"
keywords = ["http", "web", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-framed/"
categories = ["network-programming", "asynchronous",
"web-programming::http-server",
"web-programming::websocket"]
license = "MIT/Apache-2.0"
edition = "2018"
workspace =".."
[lib]
name = "actix_framed"
path = "src/lib.rs"
[dependencies]
actix-codec = "0.1.2"
actix-service = "0.3.6"
actix-utils = "0.3.4"
actix-router = "0.1.2"
actix-rt = "0.2.2"
actix-http = "0.1.0-alpha.5"
bytes = "0.4"
futures = "0.1.25"
log = "0.4"
[dev-dependencies]
actix-server = { version = "0.4.1", features=["ssl"] }
actix-connect = { version = "0.1.4", features=["ssl"] }
actix-http-test = { version = "0.1.0-alpha.3", features=["ssl"] }

201
actix-framed/LICENSE-APACHE Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2017-NOW Nikolay Kim
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
actix-framed/LICENSE-MIT Normal file
View File

@ -0,0 +1,25 @@
Copyright (c) 2017 Nikolay Kim
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

1
actix-framed/README.md Normal file
View File

@ -0,0 +1 @@
# Framed app for actix web [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-framed)](https://crates.io/crates/actix-framed) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)

5
actix-framed/changes.md Normal file
View File

@ -0,0 +1,5 @@
# Changes
## [0.1.0-alpha.1] - 2019-04-12
* Initial release

215
actix-framed/src/app.rs Normal file
View File

@ -0,0 +1,215 @@
use std::rc::Rc;
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_http::h1::{Codec, SendResponse};
use actix_http::{Error, Request, Response};
use actix_router::{Path, Router, Url};
use actix_service::{IntoNewService, NewService, Service};
use actix_utils::cloneable::CloneableService;
use futures::{Async, Future, Poll};
use crate::helpers::{BoxedHttpNewService, BoxedHttpService, HttpNewService};
use crate::request::FramedRequest;
use crate::state::State;
type BoxedResponse = Box<Future<Item = (), Error = Error>>;
pub trait HttpServiceFactory {
type Factory: NewService;
fn path(&self) -> &str;
fn create(self) -> Self::Factory;
}
/// Application builder
pub struct FramedApp<T, S = ()> {
state: State<S>,
services: Vec<(String, BoxedHttpNewService<FramedRequest<T, S>>)>,
}
impl<T: 'static> FramedApp<T, ()> {
pub fn new() -> Self {
FramedApp {
state: State::new(()),
services: Vec::new(),
}
}
}
impl<T: 'static, S: 'static> FramedApp<T, S> {
pub fn with(state: S) -> FramedApp<T, S> {
FramedApp {
services: Vec::new(),
state: State::new(state),
}
}
pub fn service<U>(mut self, factory: U) -> Self
where
U: HttpServiceFactory,
U::Factory: NewService<
Request = FramedRequest<T, S>,
Response = (),
Error = Error,
InitError = (),
> + 'static,
<U::Factory as NewService>::Future: 'static,
<U::Factory as NewService>::Service: Service<
Request = FramedRequest<T, S>,
Response = (),
Error = Error,
Future = Box<Future<Item = (), Error = Error>>,
>,
{
let path = factory.path().to_string();
self.services
.push((path, Box::new(HttpNewService::new(factory.create()))));
self
}
}
impl<T, S> IntoNewService<FramedAppFactory<T, S>> for FramedApp<T, S>
where
T: AsyncRead + AsyncWrite + 'static,
S: 'static,
{
fn into_new_service(self) -> FramedAppFactory<T, S> {
FramedAppFactory {
state: self.state,
services: Rc::new(self.services),
}
}
}
#[derive(Clone)]
pub struct FramedAppFactory<T, S> {
state: State<S>,
services: Rc<Vec<(String, BoxedHttpNewService<FramedRequest<T, S>>)>>,
}
impl<T, S, C> NewService<C> for FramedAppFactory<T, S>
where
T: AsyncRead + AsyncWrite + 'static,
S: 'static,
{
type Request = (Request, Framed<T, Codec>);
type Response = ();
type Error = Error;
type InitError = ();
type Service = CloneableService<FramedAppService<T, S>>;
type Future = CreateService<T, S>;
fn new_service(&self, _: &C) -> Self::Future {
CreateService {
fut: self
.services
.iter()
.map(|(path, service)| {
CreateServiceItem::Future(
Some(path.clone()),
service.new_service(&()),
)
})
.collect(),
state: self.state.clone(),
}
}
}
#[doc(hidden)]
pub struct CreateService<T, S> {
fut: Vec<CreateServiceItem<T, S>>,
state: State<S>,
}
enum CreateServiceItem<T, S> {
Future(
Option<String>,
Box<Future<Item = BoxedHttpService<FramedRequest<T, S>>, Error = ()>>,
),
Service(String, BoxedHttpService<FramedRequest<T, S>>),
}
impl<S: 'static, T: 'static> Future for CreateService<T, S>
where
T: AsyncRead + AsyncWrite,
{
type Item = CloneableService<FramedAppService<T, S>>;
type Error = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let mut done = true;
// poll http services
for item in &mut self.fut {
let res = match item {
CreateServiceItem::Future(ref mut path, ref mut fut) => {
match fut.poll()? {
Async::Ready(service) => Some((path.take().unwrap(), service)),
Async::NotReady => {
done = false;
None
}
}
}
CreateServiceItem::Service(_, _) => continue,
};
if let Some((path, service)) = res {
*item = CreateServiceItem::Service(path, service);
}
}
if done {
let router = self
.fut
.drain(..)
.fold(Router::build(), |mut router, item| {
match item {
CreateServiceItem::Service(path, service) => {
router.path(&path, service);
}
CreateServiceItem::Future(_, _) => unreachable!(),
}
router
});
Ok(Async::Ready(CloneableService::new(FramedAppService {
router: router.finish(),
state: self.state.clone(),
})))
} else {
Ok(Async::NotReady)
}
}
}
pub struct FramedAppService<T, S> {
state: State<S>,
router: Router<BoxedHttpService<FramedRequest<T, S>>>,
}
impl<S: 'static, T: 'static> Service for FramedAppService<T, S>
where
T: AsyncRead + AsyncWrite,
{
type Request = (Request, Framed<T, Codec>);
type Response = ();
type Error = Error;
type Future = BoxedResponse;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, (req, framed): (Request, Framed<T, Codec>)) -> Self::Future {
let mut path = Path::new(Url::new(req.uri().clone()));
if let Some((srv, _info)) = self.router.recognize_mut(&mut path) {
return srv.call(FramedRequest::new(req, framed, path, self.state.clone()));
}
Box::new(
SendResponse::new(framed, Response::NotFound().finish()).then(|_| Ok(())),
)
}
}

View File

@ -0,0 +1,88 @@
use actix_http::Error;
use actix_service::{NewService, Service};
use futures::{Future, Poll};
pub(crate) type BoxedHttpService<Req> = Box<
Service<
Request = Req,
Response = (),
Error = Error,
Future = Box<Future<Item = (), Error = Error>>,
>,
>;
pub(crate) type BoxedHttpNewService<Req> = Box<
NewService<
Request = Req,
Response = (),
Error = Error,
InitError = (),
Service = BoxedHttpService<Req>,
Future = Box<Future<Item = BoxedHttpService<Req>, Error = ()>>,
>,
>;
pub(crate) struct HttpNewService<T: NewService>(T);
impl<T> HttpNewService<T>
where
T: NewService<Response = (), Error = Error>,
T::Response: 'static,
T::Future: 'static,
T::Service: Service<Future = Box<Future<Item = (), Error = Error>>> + 'static,
<T::Service as Service>::Future: 'static,
{
pub fn new(service: T) -> Self {
HttpNewService(service)
}
}
impl<T> NewService for HttpNewService<T>
where
T: NewService<Response = (), Error = Error>,
T::Request: 'static,
T::Future: 'static,
T::Service: Service<Future = Box<Future<Item = (), Error = Error>>> + 'static,
<T::Service as Service>::Future: 'static,
{
type Request = T::Request;
type Response = ();
type Error = Error;
type InitError = ();
type Service = BoxedHttpService<T::Request>;
type Future = Box<Future<Item = Self::Service, Error = ()>>;
fn new_service(&self, _: &()) -> Self::Future {
Box::new(self.0.new_service(&()).map_err(|_| ()).and_then(|service| {
let service: BoxedHttpService<_> = Box::new(HttpServiceWrapper { service });
Ok(service)
}))
}
}
struct HttpServiceWrapper<T: Service> {
service: T,
}
impl<T> Service for HttpServiceWrapper<T>
where
T: Service<
Response = (),
Future = Box<Future<Item = (), Error = Error>>,
Error = Error,
>,
T::Request: 'static,
{
type Request = T::Request;
type Response = ();
type Error = Error;
type Future = Box<Future<Item = (), Error = Error>>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.service.poll_ready()
}
fn call(&mut self, req: Self::Request) -> Self::Future {
self.service.call(req)
}
}

16
actix-framed/src/lib.rs Normal file
View File

@ -0,0 +1,16 @@
mod app;
mod helpers;
mod request;
mod route;
mod service;
mod state;
pub mod test;
// re-export for convinience
pub use actix_http::{http, Error, HttpMessage, Response, ResponseError};
pub use self::app::{FramedApp, FramedAppService};
pub use self::request::FramedRequest;
pub use self::route::FramedRoute;
pub use self::service::{SendError, VerifyWebSockets};
pub use self::state::State;

170
actix-framed/src/request.rs Normal file
View File

@ -0,0 +1,170 @@
use std::cell::{Ref, RefMut};
use actix_codec::Framed;
use actix_http::http::{HeaderMap, Method, Uri, Version};
use actix_http::{h1::Codec, Extensions, Request, RequestHead};
use actix_router::{Path, Url};
use crate::state::State;
pub struct FramedRequest<Io, S = ()> {
req: Request,
framed: Framed<Io, Codec>,
state: State<S>,
pub(crate) path: Path<Url>,
}
impl<Io, S> FramedRequest<Io, S> {
pub fn new(
req: Request,
framed: Framed<Io, Codec>,
path: Path<Url>,
state: State<S>,
) -> Self {
Self {
req,
framed,
state,
path,
}
}
}
impl<Io, S> FramedRequest<Io, S> {
/// Split request into a parts
pub fn into_parts(self) -> (Request, Framed<Io, Codec>, State<S>) {
(self.req, self.framed, self.state)
}
/// This method returns reference to the request head
#[inline]
pub fn head(&self) -> &RequestHead {
self.req.head()
}
/// This method returns muttable reference to the request head.
/// panics if multiple references of http request exists.
#[inline]
pub fn head_mut(&mut self) -> &mut RequestHead {
self.req.head_mut()
}
/// Shared application state
#[inline]
pub fn state(&self) -> &S {
self.state.get_ref()
}
/// Request's uri.
#[inline]
pub fn uri(&self) -> &Uri {
&self.head().uri
}
/// Read the Request method.
#[inline]
pub fn method(&self) -> &Method {
&self.head().method
}
/// Read the Request Version.
#[inline]
pub fn version(&self) -> Version {
self.head().version
}
#[inline]
/// Returns request's headers.
pub fn headers(&self) -> &HeaderMap {
&self.head().headers
}
/// The target path of this Request.
#[inline]
pub fn path(&self) -> &str {
self.head().uri.path()
}
/// The query string in the URL.
///
/// E.g., id=10
#[inline]
pub fn query_string(&self) -> &str {
if let Some(query) = self.uri().query().as_ref() {
query
} else {
""
}
}
/// Get a reference to the Path parameters.
///
/// Params is a container for url parameters.
/// A variable segment is specified in the form `{identifier}`,
/// where the identifier can be used later in a request handler to
/// access the matched value for that segment.
#[inline]
pub fn match_info(&self) -> &Path<Url> {
&self.path
}
/// Request extensions
#[inline]
pub fn extensions(&self) -> Ref<Extensions> {
self.head().extensions()
}
/// Mutable reference to a the request's extensions
#[inline]
pub fn extensions_mut(&self) -> RefMut<Extensions> {
self.head().extensions_mut()
}
}
#[cfg(test)]
mod tests {
use actix_http::http::{HeaderName, HeaderValue, HttpTryFrom};
use actix_http::test::{TestBuffer, TestRequest};
use super::*;
#[test]
fn test_reqest() {
let buf = TestBuffer::empty();
let framed = Framed::new(buf, Codec::default());
let req = TestRequest::with_uri("/index.html?q=1")
.header("content-type", "test")
.finish();
let path = Path::new(Url::new(req.uri().clone()));
let mut freq = FramedRequest::new(req, framed, path, State::new(10u8));
assert_eq!(*freq.state(), 10);
assert_eq!(freq.version(), Version::HTTP_11);
assert_eq!(freq.method(), Method::GET);
assert_eq!(freq.path(), "/index.html");
assert_eq!(freq.query_string(), "q=1");
assert_eq!(
freq.headers()
.get("content-type")
.unwrap()
.to_str()
.unwrap(),
"test"
);
freq.head_mut().headers.insert(
HeaderName::try_from("x-hdr").unwrap(),
HeaderValue::from_static("test"),
);
assert_eq!(
freq.headers().get("x-hdr").unwrap().to_str().unwrap(),
"test"
);
freq.extensions_mut().insert(100usize);
assert_eq!(*freq.extensions().get::<usize>().unwrap(), 100usize);
let (_, _, state) = freq.into_parts();
assert_eq!(*state, 10);
}
}

156
actix-framed/src/route.rs Normal file
View File

@ -0,0 +1,156 @@
use std::fmt;
use std::marker::PhantomData;
use actix_codec::{AsyncRead, AsyncWrite};
use actix_http::{http::Method, Error};
use actix_service::{NewService, Service};
use futures::future::{ok, FutureResult};
use futures::{Async, Future, IntoFuture, Poll};
use log::error;
use crate::app::HttpServiceFactory;
use crate::request::FramedRequest;
/// Resource route definition
///
/// Route uses builder-like pattern for configuration.
/// If handler is not explicitly set, default *404 Not Found* handler is used.
pub struct FramedRoute<Io, S, F = (), R = ()> {
handler: F,
pattern: String,
methods: Vec<Method>,
state: PhantomData<(Io, S, R)>,
}
impl<Io, S> FramedRoute<Io, S> {
pub fn new(pattern: &str) -> Self {
FramedRoute {
handler: (),
pattern: pattern.to_string(),
methods: Vec::new(),
state: PhantomData,
}
}
pub fn get(path: &str) -> FramedRoute<Io, S> {
FramedRoute::new(path).method(Method::GET)
}
pub fn post(path: &str) -> FramedRoute<Io, S> {
FramedRoute::new(path).method(Method::POST)
}
pub fn put(path: &str) -> FramedRoute<Io, S> {
FramedRoute::new(path).method(Method::PUT)
}
pub fn delete(path: &str) -> FramedRoute<Io, S> {
FramedRoute::new(path).method(Method::DELETE)
}
pub fn method(mut self, method: Method) -> Self {
self.methods.push(method);
self
}
pub fn to<F, R>(self, handler: F) -> FramedRoute<Io, S, F, R>
where
F: FnMut(FramedRequest<Io, S>) -> R,
R: IntoFuture<Item = ()>,
R::Future: 'static,
R::Error: fmt::Debug,
{
FramedRoute {
handler,
pattern: self.pattern,
methods: self.methods,
state: PhantomData,
}
}
}
impl<Io, S, F, R> HttpServiceFactory for FramedRoute<Io, S, F, R>
where
Io: AsyncRead + AsyncWrite + 'static,
F: FnMut(FramedRequest<Io, S>) -> R + Clone,
R: IntoFuture<Item = ()>,
R::Future: 'static,
R::Error: fmt::Display,
{
type Factory = FramedRouteFactory<Io, S, F, R>;
fn path(&self) -> &str {
&self.pattern
}
fn create(self) -> Self::Factory {
FramedRouteFactory {
handler: self.handler,
methods: self.methods,
_t: PhantomData,
}
}
}
pub struct FramedRouteFactory<Io, S, F, R> {
handler: F,
methods: Vec<Method>,
_t: PhantomData<(Io, S, R)>,
}
impl<Io, S, F, R> NewService for FramedRouteFactory<Io, S, F, R>
where
Io: AsyncRead + AsyncWrite + 'static,
F: FnMut(FramedRequest<Io, S>) -> R + Clone,
R: IntoFuture<Item = ()>,
R::Future: 'static,
R::Error: fmt::Display,
{
type Request = FramedRequest<Io, S>;
type Response = ();
type Error = Error;
type InitError = ();
type Service = FramedRouteService<Io, S, F, R>;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self, _: &()) -> Self::Future {
ok(FramedRouteService {
handler: self.handler.clone(),
methods: self.methods.clone(),
_t: PhantomData,
})
}
}
pub struct FramedRouteService<Io, S, F, R> {
handler: F,
methods: Vec<Method>,
_t: PhantomData<(Io, S, R)>,
}
impl<Io, S, F, R> Service for FramedRouteService<Io, S, F, R>
where
Io: AsyncRead + AsyncWrite + 'static,
F: FnMut(FramedRequest<Io, S>) -> R + Clone,
R: IntoFuture<Item = ()>,
R::Future: 'static,
R::Error: fmt::Display,
{
type Request = FramedRequest<Io, S>;
type Response = ();
type Error = Error;
type Future = Box<Future<Item = (), Error = Error>>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, req: FramedRequest<Io, S>) -> Self::Future {
Box::new((self.handler)(req).into_future().then(|res| {
if let Err(e) = res {
error!("Error in request handler: {}", e);
}
Ok(())
}))
}
}

147
actix-framed/src/service.rs Normal file
View File

@ -0,0 +1,147 @@
use std::marker::PhantomData;
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_http::body::BodySize;
use actix_http::error::ResponseError;
use actix_http::h1::{Codec, Message};
use actix_http::ws::{verify_handshake, HandshakeError};
use actix_http::{Request, Response};
use actix_service::{NewService, Service};
use futures::future::{ok, Either, FutureResult};
use futures::{Async, Future, IntoFuture, Poll, Sink};
/// Service that verifies incoming request if it is valid websocket
/// upgrade request. In case of error returns `HandshakeError`
pub struct VerifyWebSockets<T> {
_t: PhantomData<T>,
}
impl<T> Default for VerifyWebSockets<T> {
fn default() -> Self {
VerifyWebSockets { _t: PhantomData }
}
}
impl<T, C> NewService<C> for VerifyWebSockets<T> {
type Request = (Request, Framed<T, Codec>);
type Response = (Request, Framed<T, Codec>);
type Error = (HandshakeError, Framed<T, Codec>);
type InitError = ();
type Service = VerifyWebSockets<T>;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self, _: &C) -> Self::Future {
ok(VerifyWebSockets { _t: PhantomData })
}
}
impl<T> Service for VerifyWebSockets<T> {
type Request = (Request, Framed<T, Codec>);
type Response = (Request, Framed<T, Codec>);
type Error = (HandshakeError, Framed<T, Codec>);
type Future = FutureResult<Self::Response, Self::Error>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, (req, framed): (Request, Framed<T, Codec>)) -> Self::Future {
match verify_handshake(req.head()) {
Err(e) => Err((e, framed)).into_future(),
Ok(_) => Ok((req, framed)).into_future(),
}
}
}
/// Send http/1 error response
pub struct SendError<T, R, E>(PhantomData<(T, R, E)>);
impl<T, R, E> Default for SendError<T, R, E>
where
T: AsyncRead + AsyncWrite,
E: ResponseError,
{
fn default() -> Self {
SendError(PhantomData)
}
}
impl<T, R, E, C> NewService<C> for SendError<T, R, E>
where
T: AsyncRead + AsyncWrite + 'static,
R: 'static,
E: ResponseError + 'static,
{
type Request = Result<R, (E, Framed<T, Codec>)>;
type Response = R;
type Error = (E, Framed<T, Codec>);
type InitError = ();
type Service = SendError<T, R, E>;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self, _: &C) -> Self::Future {
ok(SendError(PhantomData))
}
}
impl<T, R, E> Service for SendError<T, R, E>
where
T: AsyncRead + AsyncWrite + 'static,
R: 'static,
E: ResponseError + 'static,
{
type Request = Result<R, (E, Framed<T, Codec>)>;
type Response = R;
type Error = (E, Framed<T, Codec>);
type Future = Either<FutureResult<R, (E, Framed<T, Codec>)>, SendErrorFut<T, R, E>>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, req: Result<R, (E, Framed<T, Codec>)>) -> Self::Future {
match req {
Ok(r) => Either::A(ok(r)),
Err((e, framed)) => {
let res = e.error_response().drop_body();
Either::B(SendErrorFut {
framed: Some(framed),
res: Some((res, BodySize::Empty).into()),
err: Some(e),
_t: PhantomData,
})
}
}
}
}
pub struct SendErrorFut<T, R, E> {
res: Option<Message<(Response<()>, BodySize)>>,
framed: Option<Framed<T, Codec>>,
err: Option<E>,
_t: PhantomData<R>,
}
impl<T, R, E> Future for SendErrorFut<T, R, E>
where
E: ResponseError,
T: AsyncRead + AsyncWrite,
{
type Item = R;
type Error = (E, Framed<T, Codec>);
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if let Some(res) = self.res.take() {
if self.framed.as_mut().unwrap().force_send(res).is_err() {
return Err((self.err.take().unwrap(), self.framed.take().unwrap()));
}
}
match self.framed.as_mut().unwrap().poll_complete() {
Ok(Async::Ready(_)) => {
Err((self.err.take().unwrap(), self.framed.take().unwrap()))
}
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(_) => Err((self.err.take().unwrap(), self.framed.take().unwrap())),
}
}
}

29
actix-framed/src/state.rs Normal file
View File

@ -0,0 +1,29 @@
use std::ops::Deref;
use std::sync::Arc;
/// Application state
pub struct State<S>(Arc<S>);
impl<S> State<S> {
pub fn new(state: S) -> State<S> {
State(Arc::new(state))
}
pub fn get_ref(&self) -> &S {
self.0.as_ref()
}
}
impl<S> Deref for State<S> {
type Target = S;
fn deref(&self) -> &S {
self.0.as_ref()
}
}
impl<S> Clone for State<S> {
fn clone(&self) -> State<S> {
State(self.0.clone())
}
}

153
actix-framed/src/test.rs Normal file
View File

@ -0,0 +1,153 @@
//! Various helpers for Actix applications to use during testing.
use actix_codec::Framed;
use actix_http::h1::Codec;
use actix_http::http::header::{Header, HeaderName, IntoHeaderValue};
use actix_http::http::{HttpTryFrom, Method, Uri, Version};
use actix_http::test::{TestBuffer, TestRequest as HttpTestRequest};
use actix_router::{Path, Url};
use actix_rt::Runtime;
use futures::IntoFuture;
use crate::{FramedRequest, State};
/// Test `Request` builder.
pub struct TestRequest<S = ()> {
req: HttpTestRequest,
path: Path<Url>,
state: State<S>,
}
impl Default for TestRequest<()> {
fn default() -> TestRequest {
TestRequest {
req: HttpTestRequest::default(),
path: Path::new(Url::new(Uri::default())),
state: State::new(()),
}
}
}
impl TestRequest<()> {
/// Create TestRequest and set request uri
pub fn with_uri(path: &str) -> Self {
Self::get().uri(path)
}
/// Create TestRequest and set header
pub fn with_hdr<H: Header>(hdr: H) -> Self {
Self::default().set(hdr)
}
/// Create TestRequest and set header
pub fn with_header<K, V>(key: K, value: V) -> Self
where
HeaderName: HttpTryFrom<K>,
V: IntoHeaderValue,
{
Self::default().header(key, value)
}
/// Create TestRequest and set method to `Method::GET`
pub fn get() -> Self {
Self::default().method(Method::GET)
}
/// Create TestRequest and set method to `Method::POST`
pub fn post() -> Self {
Self::default().method(Method::POST)
}
}
impl<S> TestRequest<S> {
/// Create TestRequest and set request uri
pub fn with_state(state: S) -> TestRequest<S> {
let req = TestRequest::get();
TestRequest {
state: State::new(state),
req: req.req,
path: req.path,
}
}
/// Set HTTP version of this request
pub fn version(mut self, ver: Version) -> Self {
self.req.version(ver);
self
}
/// Set HTTP method of this request
pub fn method(mut self, meth: Method) -> Self {
self.req.method(meth);
self
}
/// Set HTTP Uri of this request
pub fn uri(mut self, path: &str) -> Self {
self.req.uri(path);
self
}
/// Set a header
pub fn set<H: Header>(mut self, hdr: H) -> Self {
self.req.set(hdr);
self
}
/// Set a header
pub fn header<K, V>(mut self, key: K, value: V) -> Self
where
HeaderName: HttpTryFrom<K>,
V: IntoHeaderValue,
{
self.req.header(key, value);
self
}
/// Set request path pattern parameter
pub fn param(mut self, name: &'static str, value: &'static str) -> Self {
self.path.add_static(name, value);
self
}
/// Complete request creation and generate `Request` instance
pub fn finish(mut self) -> FramedRequest<TestBuffer, S> {
let req = self.req.finish();
self.path.get_mut().update(req.uri());
let framed = Framed::new(TestBuffer::empty(), Codec::default());
FramedRequest::new(req, framed, self.path, self.state)
}
/// This method generates `FramedRequest` instance and executes async handler
pub fn run<F, R, I, E>(self, f: F) -> Result<I, E>
where
F: FnOnce(FramedRequest<TestBuffer, S>) -> R,
R: IntoFuture<Item = I, Error = E>,
{
let mut rt = Runtime::new().unwrap();
rt.block_on(f(self.finish()).into_future())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test() {
let req = TestRequest::with_uri("/index.html")
.header("x-test", "test")
.param("test", "123")
.finish();
assert_eq!(*req.state(), ());
assert_eq!(req.version(), Version::HTTP_11);
assert_eq!(req.method(), Method::GET);
assert_eq!(req.path(), "/index.html");
assert_eq!(req.query_string(), "");
assert_eq!(
req.headers().get("x-test").unwrap().to_str().unwrap(),
"test"
);
assert_eq!(&req.match_info()["test"], "123");
}
}

View File

@ -0,0 +1,136 @@
use actix_codec::{AsyncRead, AsyncWrite};
use actix_http::{body, ws, Error, HttpService, Response};
use actix_http_test::TestServer;
use actix_service::{IntoNewService, NewService};
use actix_utils::framed::FramedTransport;
use bytes::{Bytes, BytesMut};
use futures::future::{self, ok};
use futures::{Future, Sink, Stream};
use actix_framed::{FramedApp, FramedRequest, FramedRoute, SendError, VerifyWebSockets};
fn ws_service<T: AsyncRead + AsyncWrite>(
req: FramedRequest<T>,
) -> impl Future<Item = (), Error = Error> {
let (req, framed, _) = req.into_parts();
let res = ws::handshake(req.head()).unwrap().message_body(());
framed
.send((res, body::BodySize::None).into())
.map_err(|_| panic!())
.and_then(|framed| {
FramedTransport::new(framed.into_framed(ws::Codec::new()), service)
.map_err(|_| panic!())
})
}
fn service(msg: ws::Frame) -> impl Future<Item = ws::Message, Error = Error> {
let msg = match msg {
ws::Frame::Ping(msg) => ws::Message::Pong(msg),
ws::Frame::Text(text) => {
ws::Message::Text(String::from_utf8_lossy(&text.unwrap()).to_string())
}
ws::Frame::Binary(bin) => ws::Message::Binary(bin.unwrap().freeze()),
ws::Frame::Close(reason) => ws::Message::Close(reason),
_ => panic!(),
};
ok(msg)
}
#[test]
fn test_simple() {
let mut srv = TestServer::new(|| {
HttpService::build()
.upgrade(
FramedApp::new().service(FramedRoute::get("/index.html").to(ws_service)),
)
.finish(|_| future::ok::<_, Error>(Response::NotFound()))
});
assert!(srv.ws_at("/test").is_err());
// client service
let framed = srv.ws_at("/index.html").unwrap();
let framed = srv
.block_on(framed.send(ws::Message::Text("text".to_string())))
.unwrap();
let (item, framed) = srv.block_on(framed.into_future()).map_err(|_| ()).unwrap();
assert_eq!(item, Some(ws::Frame::Text(Some(BytesMut::from("text")))));
let framed = srv
.block_on(framed.send(ws::Message::Binary("text".into())))
.unwrap();
let (item, framed) = srv.block_on(framed.into_future()).map_err(|_| ()).unwrap();
assert_eq!(
item,
Some(ws::Frame::Binary(Some(Bytes::from_static(b"text").into())))
);
let framed = srv
.block_on(framed.send(ws::Message::Ping("text".into())))
.unwrap();
let (item, framed) = srv.block_on(framed.into_future()).map_err(|_| ()).unwrap();
assert_eq!(item, Some(ws::Frame::Pong("text".to_string().into())));
let framed = srv
.block_on(framed.send(ws::Message::Close(Some(ws::CloseCode::Normal.into()))))
.unwrap();
let (item, _framed) = srv.block_on(framed.into_future()).map_err(|_| ()).unwrap();
assert_eq!(
item,
Some(ws::Frame::Close(Some(ws::CloseCode::Normal.into())))
);
}
#[test]
fn test_service() {
let mut srv = TestServer::new(|| {
actix_http::h1::OneRequest::new().map_err(|_| ()).and_then(
VerifyWebSockets::default()
.then(SendError::default())
.map_err(|_| ())
.and_then(
FramedApp::new()
.service(FramedRoute::get("/index.html").to(ws_service))
.into_new_service()
.map_err(|_| ()),
),
)
});
assert!(srv.ws_at("/test").is_err());
// client service
let framed = srv.ws_at("/index.html").unwrap();
let framed = srv
.block_on(framed.send(ws::Message::Text("text".to_string())))
.unwrap();
let (item, framed) = srv.block_on(framed.into_future()).map_err(|_| ()).unwrap();
assert_eq!(item, Some(ws::Frame::Text(Some(BytesMut::from("text")))));
let framed = srv
.block_on(framed.send(ws::Message::Binary("text".into())))
.unwrap();
let (item, framed) = srv.block_on(framed.into_future()).map_err(|_| ()).unwrap();
assert_eq!(
item,
Some(ws::Frame::Binary(Some(Bytes::from_static(b"text").into())))
);
let framed = srv
.block_on(framed.send(ws::Message::Ping("text".into())))
.unwrap();
let (item, framed) = srv.block_on(framed.into_future()).map_err(|_| ()).unwrap();
assert_eq!(item, Some(ws::Frame::Pong("text".to_string().into())));
let framed = srv
.block_on(framed.send(ws::Message::Close(Some(ws::CloseCode::Normal.into()))))
.unwrap();
let (item, _framed) = srv.block_on(framed.into_future()).map_err(|_| ()).unwrap();
assert_eq!(
item,
Some(ws::Frame::Close(Some(ws::CloseCode::Normal.into())))
);
}

41
actix-http/.appveyor.yml Normal file
View File

@ -0,0 +1,41 @@
environment:
global:
PROJECT_NAME: actix-http
matrix:
# Stable channel
- TARGET: i686-pc-windows-msvc
CHANNEL: stable
- TARGET: x86_64-pc-windows-gnu
CHANNEL: stable
- TARGET: x86_64-pc-windows-msvc
CHANNEL: stable
# Nightly channel
- TARGET: i686-pc-windows-msvc
CHANNEL: nightly
- TARGET: x86_64-pc-windows-gnu
CHANNEL: nightly
- TARGET: x86_64-pc-windows-msvc
CHANNEL: nightly
# Install Rust and Cargo
# (Based on from https://github.com/rust-lang/libc/blob/master/appveyor.yml)
install:
- ps: >-
If ($Env:TARGET -eq 'x86_64-pc-windows-gnu') {
$Env:PATH += ';C:\msys64\mingw64\bin'
} ElseIf ($Env:TARGET -eq 'i686-pc-windows-gnu') {
$Env:PATH += ';C:\MinGW\bin'
}
- curl -sSf -o rustup-init.exe https://win.rustup.rs
- rustup-init.exe --default-host %TARGET% --default-toolchain %CHANNEL% -y
- set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
- rustc -Vv
- cargo -V
# 'cargo test' takes care of building for us, so disable Appveyor's build stage.
build: false
# Equivalent to Travis' `script` phase
test_script:
- cargo clean
- cargo test

80
actix-http/CHANGES.md Normal file
View File

@ -0,0 +1,80 @@
# Changes
## [0.1.0] - 2019-04-16
### Added
* Expose peer addr via `Request::peer_addr()` and `RequestHead::peer_addr`
### Changed
* `actix_http::encoding` always available
* use trust-dns-resolver 0.11.0
## [0.1.0-alpha.5] - 2019-04-12
### Added
* Allow to use custom service for upgrade requests
* Added `h1::SendResponse` future.
### Changed
* MessageBody::length() renamed to MessageBody::size() for consistency
* ws handshake verification functions take RequestHead instead of Request
## [0.1.0-alpha.4] - 2019-04-08
### Added
* Allow to use custom `Expect` handler
* Add minimal `std::error::Error` impl for `Error`
### Changed
* Export IntoHeaderValue
* Render error and return as response body
* Use thread pool for response body comression
### Deleted
* Removed PayloadBuffer
## [0.1.0-alpha.3] - 2019-04-02
### Added
* Warn when an unsealed private cookie isn't valid UTF-8
### Fixed
* Rust 1.31.0 compatibility
* Preallocate read buffer for h1 codec
* Detect socket disconnection during protocol selection
## [0.1.0-alpha.2] - 2019-03-29
### Added
* Added ws::Message::Nop, no-op websockets message
### Changed
* Do not use thread pool for decomression if chunk size is smaller than 2048.
## [0.1.0-alpha.1] - 2019-03-28
* Initial impl

View File

@ -0,0 +1,46 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at fafhrd91@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/

105
actix-http/Cargo.toml Normal file
View File

@ -0,0 +1,105 @@
[package]
name = "actix-http"
version = "0.1.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix http primitives"
readme = "README.md"
keywords = ["actix", "http", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-http/"
categories = ["network-programming", "asynchronous",
"web-programming::http-server",
"web-programming::websocket"]
license = "MIT/Apache-2.0"
edition = "2018"
workspace = ".."
[package.metadata.docs.rs]
features = ["ssl", "fail", "brotli", "flate2-zlib", "secure-cookies"]
[lib]
name = "actix_http"
path = "src/lib.rs"
[features]
default = []
# openssl
ssl = ["openssl", "actix-connect/ssl"]
# brotli encoding, requires c compiler
brotli = ["brotli2"]
# miniz-sys backend for flate2 crate
flate2-zlib = ["flate2/miniz-sys"]
# rust backend for flate2 crate
flate2-rust = ["flate2/rust_backend"]
# failure integration. actix does not use failure anymore
fail = ["failure"]
# support for secure cookies
secure-cookies = ["ring"]
[dependencies]
actix-service = "0.3.6"
actix-codec = "0.1.2"
actix-connect = "0.1.4"
actix-utils = "0.3.5"
actix-server-config = "0.1.1"
actix-threadpool = "0.1.0"
base64 = "0.10"
bitflags = "1.0"
bytes = "0.4"
byteorder = "1.2"
copyless = "0.1.2"
derive_more = "0.14"
either = "1.5.2"
encoding = "0.2"
futures = "0.1"
hashbrown = "0.2.2"
h2 = "0.1.16"
http = "0.1.17"
httparse = "1.3"
indexmap = "1.0"
lazy_static = "1.0"
language-tags = "0.2"
log = "0.4"
mime = "0.3"
percent-encoding = "1.0"
rand = "0.6"
regex = "1.0"
serde = "1.0"
serde_json = "1.0"
sha1 = "0.6"
slab = "0.4"
serde_urlencoded = "0.5.5"
time = "0.1"
tokio-tcp = "0.1.3"
tokio-timer = "0.2.8"
tokio-current-thread = "0.1"
trust-dns-resolver = { version="0.11.0", default-features = false }
# for secure cookie
ring = { version = "0.14.6", optional = true }
# compression
brotli2 = { version="0.3.2", optional = true }
flate2 = { version="1.0.7", optional = true, default-features = false }
# optional deps
failure = { version = "0.1.5", optional = true }
openssl = { version="0.10", optional = true }
[dev-dependencies]
actix-rt = "0.2.2"
actix-server = { version = "0.4.3", features=["ssl"] }
actix-connect = { version = "0.1.4", features=["ssl"] }
actix-http-test = { version = "0.1.0-alpha.3", features=["ssl"] }
env_logger = "0.6"
serde_derive = "1.0"
openssl = { version="0.10" }
tokio-tcp = "0.1"

201
actix-http/LICENSE-APACHE Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2017-NOW Nikolay Kim
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

25
actix-http/LICENSE-MIT Normal file
View File

@ -0,0 +1,25 @@
Copyright (c) 2017 Nikolay Kim
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

46
actix-http/README.md Normal file
View File

@ -0,0 +1,46 @@
# Actix http [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-http)](https://crates.io/crates/actix-http) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
Actix http
## Documentation & community resources
* [User Guide](https://actix.rs/docs/)
* [API Documentation](https://docs.rs/actix-http/)
* [Chat on gitter](https://gitter.im/actix/actix)
* Cargo package: [actix-http](https://crates.io/crates/actix-http)
* Minimum supported Rust version: 1.31 or later
## Example
```rust
// see examples/framed_hello.rs for complete list of used crates.
extern crate actix_http;
use actix_http::{h1, Response, ServiceConfig};
fn main() {
Server::new().bind("framed_hello", "127.0.0.1:8080", || {
IntoFramed::new(|| h1::Codec::new(ServiceConfig::default())) // <- create h1 codec
.and_then(TakeItem::new().map_err(|_| ())) // <- read one request
.and_then(|(_req, _framed): (_, Framed<_, _>)| { // <- send response and close conn
SendResponse::send(_framed, Response::Ok().body("Hello world!"))
.map_err(|_| ())
.map(|_| ())
})
}).unwrap().run();
}
```
## License
This project is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
* MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
at your option.
## Code of Conduct
Contribution to the actix-http crate is organized under the terms of the
Contributor Covenant, the maintainer of actix-http, @fafhrd91, promises to
intervene to uphold that code of conduct.

View File

@ -0,0 +1,37 @@
use std::{env, io};
use actix_http::{error::PayloadError, HttpService, Request, Response};
use actix_server::Server;
use bytes::BytesMut;
use futures::{Future, Stream};
use http::header::HeaderValue;
use log::info;
fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "echo=info");
env_logger::init();
Server::build()
.bind("echo", "127.0.0.1:8080", || {
HttpService::build()
.client_timeout(1000)
.client_disconnect(1000)
.finish(|mut req: Request| {
req.take_payload()
.fold(BytesMut::new(), move |mut body, chunk| {
body.extend_from_slice(&chunk);
Ok::<_, PayloadError>(body)
})
.and_then(|bytes| {
info!("request body: {:?}", bytes);
let mut res = Response::Ok();
res.header(
"x-head",
HeaderValue::from_static("dummy value!"),
);
Ok(res.body(bytes))
})
})
})?
.run()
}

View File

@ -0,0 +1,34 @@
use std::{env, io};
use actix_http::http::HeaderValue;
use actix_http::{error::PayloadError, Error, HttpService, Request, Response};
use actix_server::Server;
use bytes::BytesMut;
use futures::{Future, Stream};
use log::info;
fn handle_request(mut req: Request) -> impl Future<Item = Response, Error = Error> {
req.take_payload()
.fold(BytesMut::new(), move |mut body, chunk| {
body.extend_from_slice(&chunk);
Ok::<_, PayloadError>(body)
})
.from_err()
.and_then(|bytes| {
info!("request body: {:?}", bytes);
let mut res = Response::Ok();
res.header("x-head", HeaderValue::from_static("dummy value!"));
Ok(res.body(bytes))
})
}
fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "echo=info");
env_logger::init();
Server::build()
.bind("echo", "127.0.0.1:8080", || {
HttpService::build().finish(|_req: Request| handle_request(_req))
})?
.run()
}

View File

@ -0,0 +1,26 @@
use std::{env, io};
use actix_http::{HttpService, Response};
use actix_server::Server;
use futures::future;
use http::header::HeaderValue;
use log::info;
fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "hello_world=info");
env_logger::init();
Server::build()
.bind("hello-world", "127.0.0.1:8080", || {
HttpService::build()
.client_timeout(1000)
.client_disconnect(1000)
.finish(|_req| {
info!("{:?}", _req);
let mut res = Response::Ok();
res.header("x-head", HeaderValue::from_static("dummy value!"));
future::ok::<_, ()>(res.body("Hello world!"))
})
})?
.run()
}

5
actix-http/rustfmt.toml Normal file
View File

@ -0,0 +1,5 @@
max_width = 89
reorder_imports = true
#wrap_comments = true
#fn_args_density = "Compressed"
#use_small_heuristics = false

532
actix-http/src/body.rs Normal file
View File

@ -0,0 +1,532 @@
use std::marker::PhantomData;
use std::{fmt, mem};
use bytes::{Bytes, BytesMut};
use futures::{Async, Poll, Stream};
use crate::error::Error;
#[derive(Debug, PartialEq, Copy, Clone)]
/// Body size hint
pub enum BodySize {
None,
Empty,
Sized(usize),
Sized64(u64),
Stream,
}
impl BodySize {
pub fn is_eof(&self) -> bool {
match self {
BodySize::None
| BodySize::Empty
| BodySize::Sized(0)
| BodySize::Sized64(0) => true,
_ => false,
}
}
}
/// Type that provides this trait can be streamed to a peer.
pub trait MessageBody {
fn size(&self) -> BodySize;
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error>;
}
impl MessageBody for () {
fn size(&self) -> BodySize {
BodySize::Empty
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
Ok(Async::Ready(None))
}
}
impl<T: MessageBody> MessageBody for Box<T> {
fn size(&self) -> BodySize {
self.as_ref().size()
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
self.as_mut().poll_next()
}
}
pub enum ResponseBody<B> {
Body(B),
Other(Body),
}
impl ResponseBody<Body> {
pub fn into_body<B>(self) -> ResponseBody<B> {
match self {
ResponseBody::Body(b) => ResponseBody::Other(b),
ResponseBody::Other(b) => ResponseBody::Other(b),
}
}
}
impl<B> ResponseBody<B> {
pub fn take_body(&mut self) -> ResponseBody<B> {
std::mem::replace(self, ResponseBody::Other(Body::None))
}
}
impl<B: MessageBody> ResponseBody<B> {
pub fn as_ref(&self) -> Option<&B> {
if let ResponseBody::Body(ref b) = self {
Some(b)
} else {
None
}
}
}
impl<B: MessageBody> MessageBody for ResponseBody<B> {
fn size(&self) -> BodySize {
match self {
ResponseBody::Body(ref body) => body.size(),
ResponseBody::Other(ref body) => body.size(),
}
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
match self {
ResponseBody::Body(ref mut body) => body.poll_next(),
ResponseBody::Other(ref mut body) => body.poll_next(),
}
}
}
impl<B: MessageBody> Stream for ResponseBody<B> {
type Item = Bytes;
type Error = Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
self.poll_next()
}
}
/// Represents various types of http message body.
pub enum Body {
/// Empty response. `Content-Length` header is not set.
None,
/// Zero sized response body. `Content-Length` header is set to `0`.
Empty,
/// Specific response body.
Bytes(Bytes),
/// Generic message body.
Message(Box<dyn MessageBody>),
}
impl Body {
/// Create body from slice (copy)
pub fn from_slice(s: &[u8]) -> Body {
Body::Bytes(Bytes::from(s))
}
/// Create body from generic message body.
pub fn from_message<B: MessageBody + 'static>(body: B) -> Body {
Body::Message(Box::new(body))
}
}
impl MessageBody for Body {
fn size(&self) -> BodySize {
match self {
Body::None => BodySize::None,
Body::Empty => BodySize::Empty,
Body::Bytes(ref bin) => BodySize::Sized(bin.len()),
Body::Message(ref body) => body.size(),
}
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
match self {
Body::None => Ok(Async::Ready(None)),
Body::Empty => Ok(Async::Ready(None)),
Body::Bytes(ref mut bin) => {
let len = bin.len();
if len == 0 {
Ok(Async::Ready(None))
} else {
Ok(Async::Ready(Some(mem::replace(bin, Bytes::new()))))
}
}
Body::Message(ref mut body) => body.poll_next(),
}
}
}
impl PartialEq for Body {
fn eq(&self, other: &Body) -> bool {
match *self {
Body::None => match *other {
Body::None => true,
_ => false,
},
Body::Empty => match *other {
Body::Empty => true,
_ => false,
},
Body::Bytes(ref b) => match *other {
Body::Bytes(ref b2) => b == b2,
_ => false,
},
Body::Message(_) => false,
}
}
}
impl fmt::Debug for Body {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Body::None => write!(f, "Body::None"),
Body::Empty => write!(f, "Body::Empty"),
Body::Bytes(ref b) => write!(f, "Body::Bytes({:?})", b),
Body::Message(_) => write!(f, "Body::Message(_)"),
}
}
}
impl From<&'static str> for Body {
fn from(s: &'static str) -> Body {
Body::Bytes(Bytes::from_static(s.as_ref()))
}
}
impl From<&'static [u8]> for Body {
fn from(s: &'static [u8]) -> Body {
Body::Bytes(Bytes::from_static(s))
}
}
impl From<Vec<u8>> for Body {
fn from(vec: Vec<u8>) -> Body {
Body::Bytes(Bytes::from(vec))
}
}
impl From<String> for Body {
fn from(s: String) -> Body {
s.into_bytes().into()
}
}
impl<'a> From<&'a String> for Body {
fn from(s: &'a String) -> Body {
Body::Bytes(Bytes::from(AsRef::<[u8]>::as_ref(&s)))
}
}
impl From<Bytes> for Body {
fn from(s: Bytes) -> Body {
Body::Bytes(s)
}
}
impl From<BytesMut> for Body {
fn from(s: BytesMut) -> Body {
Body::Bytes(s.freeze())
}
}
impl MessageBody for Bytes {
fn size(&self) -> BodySize {
BodySize::Sized(self.len())
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
if self.is_empty() {
Ok(Async::Ready(None))
} else {
Ok(Async::Ready(Some(mem::replace(self, Bytes::new()))))
}
}
}
impl MessageBody for BytesMut {
fn size(&self) -> BodySize {
BodySize::Sized(self.len())
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
if self.is_empty() {
Ok(Async::Ready(None))
} else {
Ok(Async::Ready(Some(
mem::replace(self, BytesMut::new()).freeze(),
)))
}
}
}
impl MessageBody for &'static str {
fn size(&self) -> BodySize {
BodySize::Sized(self.len())
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
if self.is_empty() {
Ok(Async::Ready(None))
} else {
Ok(Async::Ready(Some(Bytes::from_static(
mem::replace(self, "").as_ref(),
))))
}
}
}
impl MessageBody for &'static [u8] {
fn size(&self) -> BodySize {
BodySize::Sized(self.len())
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
if self.is_empty() {
Ok(Async::Ready(None))
} else {
Ok(Async::Ready(Some(Bytes::from_static(mem::replace(
self, b"",
)))))
}
}
}
impl MessageBody for Vec<u8> {
fn size(&self) -> BodySize {
BodySize::Sized(self.len())
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
if self.is_empty() {
Ok(Async::Ready(None))
} else {
Ok(Async::Ready(Some(Bytes::from(mem::replace(
self,
Vec::new(),
)))))
}
}
}
impl MessageBody for String {
fn size(&self) -> BodySize {
BodySize::Sized(self.len())
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
if self.is_empty() {
Ok(Async::Ready(None))
} else {
Ok(Async::Ready(Some(Bytes::from(
mem::replace(self, String::new()).into_bytes(),
))))
}
}
}
/// Type represent streaming body.
/// Response does not contain `content-length` header and appropriate transfer encoding is used.
pub struct BodyStream<S, E> {
stream: S,
_t: PhantomData<E>,
}
impl<S, E> BodyStream<S, E>
where
S: Stream<Item = Bytes, Error = E>,
E: Into<Error>,
{
pub fn new(stream: S) -> Self {
BodyStream {
stream,
_t: PhantomData,
}
}
}
impl<S, E> MessageBody for BodyStream<S, E>
where
S: Stream<Item = Bytes, Error = E>,
E: Into<Error>,
{
fn size(&self) -> BodySize {
BodySize::Stream
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
self.stream.poll().map_err(std::convert::Into::into)
}
}
/// Type represent streaming body. This body implementation should be used
/// if total size of stream is known. Data get sent as is without using transfer encoding.
pub struct SizedStream<S> {
size: usize,
stream: S,
}
impl<S> SizedStream<S>
where
S: Stream<Item = Bytes, Error = Error>,
{
pub fn new(size: usize, stream: S) -> Self {
SizedStream { size, stream }
}
}
impl<S> MessageBody for SizedStream<S>
where
S: Stream<Item = Bytes, Error = Error>,
{
fn size(&self) -> BodySize {
BodySize::Sized(self.size)
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
self.stream.poll()
}
}
#[cfg(test)]
mod tests {
use super::*;
impl Body {
pub(crate) fn get_ref(&self) -> &[u8] {
match *self {
Body::Bytes(ref bin) => &bin,
_ => panic!(),
}
}
}
impl ResponseBody<Body> {
pub(crate) fn get_ref(&self) -> &[u8] {
match *self {
ResponseBody::Body(ref b) => b.get_ref(),
ResponseBody::Other(ref b) => b.get_ref(),
}
}
}
#[test]
fn test_static_str() {
assert_eq!(Body::from("").size(), BodySize::Sized(0));
assert_eq!(Body::from("test").size(), BodySize::Sized(4));
assert_eq!(Body::from("test").get_ref(), b"test");
assert_eq!("test".size(), BodySize::Sized(4));
assert_eq!(
"test".poll_next().unwrap(),
Async::Ready(Some(Bytes::from("test")))
);
}
#[test]
fn test_static_bytes() {
assert_eq!(Body::from(b"test".as_ref()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b"test".as_ref()).get_ref(), b"test");
assert_eq!(
Body::from_slice(b"test".as_ref()).size(),
BodySize::Sized(4)
);
assert_eq!(Body::from_slice(b"test".as_ref()).get_ref(), b"test");
assert_eq!((&b"test"[..]).size(), BodySize::Sized(4));
assert_eq!(
(&b"test"[..]).poll_next().unwrap(),
Async::Ready(Some(Bytes::from("test")))
);
}
#[test]
fn test_vec() {
assert_eq!(Body::from(Vec::from("test")).size(), BodySize::Sized(4));
assert_eq!(Body::from(Vec::from("test")).get_ref(), b"test");
assert_eq!(Vec::from("test").size(), BodySize::Sized(4));
assert_eq!(
Vec::from("test").poll_next().unwrap(),
Async::Ready(Some(Bytes::from("test")))
);
}
#[test]
fn test_bytes() {
let mut b = Bytes::from("test");
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
b.poll_next().unwrap(),
Async::Ready(Some(Bytes::from("test")))
);
}
#[test]
fn test_bytes_mut() {
let mut b = BytesMut::from("test");
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
b.poll_next().unwrap(),
Async::Ready(Some(Bytes::from("test")))
);
}
#[test]
fn test_string() {
let mut b = "test".to_owned();
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
assert_eq!(Body::from(&b).size(), BodySize::Sized(4));
assert_eq!(Body::from(&b).get_ref(), b"test");
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
b.poll_next().unwrap(),
Async::Ready(Some(Bytes::from("test")))
);
}
#[test]
fn test_unit() {
assert_eq!(().size(), BodySize::Empty);
assert_eq!(().poll_next().unwrap(), Async::Ready(None));
}
#[test]
fn test_box() {
let mut val = Box::new(());
assert_eq!(val.size(), BodySize::Empty);
assert_eq!(val.poll_next().unwrap(), Async::Ready(None));
}
#[test]
fn test_body_eq() {
assert!(Body::None == Body::None);
assert!(Body::None != Body::Empty);
assert!(Body::Empty == Body::Empty);
assert!(Body::Empty != Body::None);
assert!(
Body::Bytes(Bytes::from_static(b"1"))
== Body::Bytes(Bytes::from_static(b"1"))
);
assert!(Body::Bytes(Bytes::from_static(b"1")) != Body::None);
}
#[test]
fn test_body_debug() {
assert!(format!("{:?}", Body::None).contains("Body::None"));
assert!(format!("{:?}", Body::Empty).contains("Body::Empty"));
assert!(format!("{:?}", Body::Bytes(Bytes::from_static(b"1"))).contains("1"));
}
}

195
actix-http/src/builder.rs Normal file
View File

@ -0,0 +1,195 @@
use std::fmt;
use std::marker::PhantomData;
use actix_codec::Framed;
use actix_server_config::ServerConfig as SrvConfig;
use actix_service::{IntoNewService, NewService, Service};
use crate::body::MessageBody;
use crate::config::{KeepAlive, ServiceConfig};
use crate::error::Error;
use crate::h1::{Codec, ExpectHandler, H1Service, UpgradeHandler};
use crate::h2::H2Service;
use crate::request::Request;
use crate::response::Response;
use crate::service::HttpService;
/// A http service builder
///
/// This type can be used to construct an instance of `http service` through a
/// builder-like pattern.
pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler<T>> {
keep_alive: KeepAlive,
client_timeout: u64,
client_disconnect: u64,
expect: X,
upgrade: Option<U>,
_t: PhantomData<(T, S)>,
}
impl<T, S> HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler<T>>
where
S: NewService<SrvConfig, Request = Request>,
S::Error: Into<Error>,
S::InitError: fmt::Debug,
{
/// Create instance of `ServiceConfigBuilder`
pub fn new() -> Self {
HttpServiceBuilder {
keep_alive: KeepAlive::Timeout(5),
client_timeout: 5000,
client_disconnect: 0,
expect: ExpectHandler,
upgrade: None,
_t: PhantomData,
}
}
}
impl<T, S, X, U> HttpServiceBuilder<T, S, X, U>
where
S: NewService<SrvConfig, Request = Request>,
S::Error: Into<Error>,
S::InitError: fmt::Debug,
X: NewService<Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
U: NewService<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
U::InitError: fmt::Debug,
{
/// Set server keep-alive setting.
///
/// By default keep alive is set to a 5 seconds.
pub fn keep_alive<W: Into<KeepAlive>>(mut self, val: W) -> Self {
self.keep_alive = val.into();
self
}
/// Set server client timeout in milliseconds for first request.
///
/// Defines a timeout for reading client request header. If a client does not transmit
/// the entire set headers within this time, the request is terminated with
/// the 408 (Request Time-out) error.
///
/// To disable timeout set value to 0.
///
/// By default client timeout is set to 5000 milliseconds.
pub fn client_timeout(mut self, val: u64) -> Self {
self.client_timeout = val;
self
}
/// Set server connection disconnect timeout in milliseconds.
///
/// Defines a timeout for disconnect connection. If a disconnect procedure does not complete
/// within this time, the request get dropped. This timeout affects secure connections.
///
/// To disable timeout set value to 0.
///
/// By default disconnect timeout is set to 0.
pub fn client_disconnect(mut self, val: u64) -> Self {
self.client_disconnect = val;
self
}
/// Provide service for `EXPECT: 100-Continue` support.
///
/// Service get called with request that contains `EXPECT` header.
/// Service must return request in case of success, in that case
/// request will be forwarded to main service.
pub fn expect<F, X1>(self, expect: F) -> HttpServiceBuilder<T, S, X1, U>
where
F: IntoNewService<X1>,
X1: NewService<Request = Request, Response = Request>,
X1::Error: Into<Error>,
X1::InitError: fmt::Debug,
{
HttpServiceBuilder {
keep_alive: self.keep_alive,
client_timeout: self.client_timeout,
client_disconnect: self.client_disconnect,
expect: expect.into_new_service(),
upgrade: self.upgrade,
_t: PhantomData,
}
}
/// Provide service for custom `Connection: UPGRADE` support.
///
/// If service is provided then normal requests handling get halted
/// and this service get called with original request and framed object.
pub fn upgrade<F, U1>(self, upgrade: F) -> HttpServiceBuilder<T, S, X, U1>
where
F: IntoNewService<U1>,
U1: NewService<Request = (Request, Framed<T, Codec>), Response = ()>,
U1::Error: fmt::Display,
U1::InitError: fmt::Debug,
{
HttpServiceBuilder {
keep_alive: self.keep_alive,
client_timeout: self.client_timeout,
client_disconnect: self.client_disconnect,
expect: self.expect,
upgrade: Some(upgrade.into_new_service()),
_t: PhantomData,
}
}
/// Finish service configuration and create *http service* for HTTP/1 protocol.
pub fn h1<F, P, B>(self, service: F) -> H1Service<T, P, S, B, X, U>
where
B: MessageBody + 'static,
F: IntoNewService<S, SrvConfig>,
S::Error: Into<Error>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
{
let cfg = ServiceConfig::new(
self.keep_alive,
self.client_timeout,
self.client_disconnect,
);
H1Service::with_config(cfg, service.into_new_service())
.expect(self.expect)
.upgrade(self.upgrade)
}
/// Finish service configuration and create *http service* for HTTP/2 protocol.
pub fn h2<F, P, B>(self, service: F) -> H2Service<T, P, S, B>
where
B: MessageBody + 'static,
F: IntoNewService<S, SrvConfig>,
S::Error: Into<Error>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
<S::Service as Service>::Future: 'static,
{
let cfg = ServiceConfig::new(
self.keep_alive,
self.client_timeout,
self.client_disconnect,
);
H2Service::with_config(cfg, service.into_new_service())
}
/// Finish service configuration and create `HttpService` instance.
pub fn finish<F, P, B>(self, service: F) -> HttpService<T, P, S, B, X, U>
where
B: MessageBody + 'static,
F: IntoNewService<S, SrvConfig>,
S::Error: Into<Error>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
<S::Service as Service>::Future: 'static,
{
let cfg = ServiceConfig::new(
self.keep_alive,
self.client_timeout,
self.client_disconnect,
);
HttpService::with_config(cfg, service.into_new_service())
.expect(self.expect)
.upgrade(self.upgrade)
}
}

View File

@ -0,0 +1,286 @@
use std::{fmt, io, time};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use bytes::{Buf, Bytes};
use futures::future::{err, Either, Future, FutureResult};
use futures::Poll;
use h2::client::SendRequest;
use crate::body::MessageBody;
use crate::h1::ClientCodec;
use crate::message::{RequestHead, ResponseHead};
use crate::payload::Payload;
use super::error::SendRequestError;
use super::pool::{Acquired, Protocol};
use super::{h1proto, h2proto};
pub(crate) enum ConnectionType<Io> {
H1(Io),
H2(SendRequest<Bytes>),
}
pub trait Connection {
type Io: AsyncRead + AsyncWrite;
type Future: Future<Item = (ResponseHead, Payload), Error = SendRequestError>;
fn protocol(&self) -> Protocol;
/// Send request and body
fn send_request<B: MessageBody + 'static>(
self,
head: RequestHead,
body: B,
) -> Self::Future;
type TunnelFuture: Future<
Item = (ResponseHead, Framed<Self::Io, ClientCodec>),
Error = SendRequestError,
>;
/// Send request, returns Response and Framed
fn open_tunnel(self, head: RequestHead) -> Self::TunnelFuture;
}
pub(crate) trait ConnectionLifetime: AsyncRead + AsyncWrite + 'static {
/// Close connection
fn close(&mut self);
/// Release connection to the connection pool
fn release(&mut self);
}
#[doc(hidden)]
/// HTTP client connection
pub struct IoConnection<T> {
io: Option<ConnectionType<T>>,
created: time::Instant,
pool: Option<Acquired<T>>,
}
impl<T> fmt::Debug for IoConnection<T>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.io {
Some(ConnectionType::H1(ref io)) => write!(f, "H1Connection({:?})", io),
Some(ConnectionType::H2(_)) => write!(f, "H2Connection"),
None => write!(f, "Connection(Empty)"),
}
}
}
impl<T: AsyncRead + AsyncWrite> IoConnection<T> {
pub(crate) fn new(
io: ConnectionType<T>,
created: time::Instant,
pool: Option<Acquired<T>>,
) -> Self {
IoConnection {
pool,
created,
io: Some(io),
}
}
pub(crate) fn into_inner(self) -> (ConnectionType<T>, time::Instant) {
(self.io.unwrap(), self.created)
}
}
impl<T> Connection for IoConnection<T>
where
T: AsyncRead + AsyncWrite + 'static,
{
type Io = T;
type Future = Box<Future<Item = (ResponseHead, Payload), Error = SendRequestError>>;
fn protocol(&self) -> Protocol {
match self.io {
Some(ConnectionType::H1(_)) => Protocol::Http1,
Some(ConnectionType::H2(_)) => Protocol::Http2,
None => Protocol::Http1,
}
}
fn send_request<B: MessageBody + 'static>(
mut self,
head: RequestHead,
body: B,
) -> Self::Future {
match self.io.take().unwrap() {
ConnectionType::H1(io) => Box::new(h1proto::send_request(
io,
head,
body,
self.created,
self.pool,
)),
ConnectionType::H2(io) => Box::new(h2proto::send_request(
io,
head,
body,
self.created,
self.pool,
)),
}
}
type TunnelFuture = Either<
Box<
Future<
Item = (ResponseHead, Framed<Self::Io, ClientCodec>),
Error = SendRequestError,
>,
>,
FutureResult<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
>;
/// Send request, returns Response and Framed
fn open_tunnel(mut self, head: RequestHead) -> Self::TunnelFuture {
match self.io.take().unwrap() {
ConnectionType::H1(io) => {
Either::A(Box::new(h1proto::open_tunnel(io, head)))
}
ConnectionType::H2(io) => {
if let Some(mut pool) = self.pool.take() {
pool.release(IoConnection::new(
ConnectionType::H2(io),
self.created,
None,
));
}
Either::B(err(SendRequestError::TunnelNotSupported))
}
}
}
}
#[allow(dead_code)]
pub(crate) enum EitherConnection<A, B> {
A(IoConnection<A>),
B(IoConnection<B>),
}
impl<A, B> Connection for EitherConnection<A, B>
where
A: AsyncRead + AsyncWrite + 'static,
B: AsyncRead + AsyncWrite + 'static,
{
type Io = EitherIo<A, B>;
type Future = Box<Future<Item = (ResponseHead, Payload), Error = SendRequestError>>;
fn protocol(&self) -> Protocol {
match self {
EitherConnection::A(con) => con.protocol(),
EitherConnection::B(con) => con.protocol(),
}
}
fn send_request<RB: MessageBody + 'static>(
self,
head: RequestHead,
body: RB,
) -> Self::Future {
match self {
EitherConnection::A(con) => con.send_request(head, body),
EitherConnection::B(con) => con.send_request(head, body),
}
}
type TunnelFuture = Box<
Future<
Item = (ResponseHead, Framed<Self::Io, ClientCodec>),
Error = SendRequestError,
>,
>;
/// Send request, returns Response and Framed
fn open_tunnel(self, head: RequestHead) -> Self::TunnelFuture {
match self {
EitherConnection::A(con) => Box::new(
con.open_tunnel(head)
.map(|(head, framed)| (head, framed.map_io(EitherIo::A))),
),
EitherConnection::B(con) => Box::new(
con.open_tunnel(head)
.map(|(head, framed)| (head, framed.map_io(EitherIo::B))),
),
}
}
}
pub enum EitherIo<A, B> {
A(A),
B(B),
}
impl<A, B> io::Read for EitherIo<A, B>
where
A: io::Read,
B: io::Read,
{
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match self {
EitherIo::A(ref mut val) => val.read(buf),
EitherIo::B(ref mut val) => val.read(buf),
}
}
}
impl<A, B> AsyncRead for EitherIo<A, B>
where
A: AsyncRead,
B: AsyncRead,
{
unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool {
match self {
EitherIo::A(ref val) => val.prepare_uninitialized_buffer(buf),
EitherIo::B(ref val) => val.prepare_uninitialized_buffer(buf),
}
}
}
impl<A, B> io::Write for EitherIo<A, B>
where
A: io::Write,
B: io::Write,
{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
match self {
EitherIo::A(ref mut val) => val.write(buf),
EitherIo::B(ref mut val) => val.write(buf),
}
}
fn flush(&mut self) -> io::Result<()> {
match self {
EitherIo::A(ref mut val) => val.flush(),
EitherIo::B(ref mut val) => val.flush(),
}
}
}
impl<A, B> AsyncWrite for EitherIo<A, B>
where
A: AsyncWrite,
B: AsyncWrite,
{
fn shutdown(&mut self) -> Poll<(), io::Error> {
match self {
EitherIo::A(ref mut val) => val.shutdown(),
EitherIo::B(ref mut val) => val.shutdown(),
}
}
fn write_buf<U: Buf>(&mut self, buf: &mut U) -> Poll<usize, io::Error>
where
Self: Sized,
{
match self {
EitherIo::A(ref mut val) => val.write_buf(buf),
EitherIo::B(ref mut val) => val.write_buf(buf),
}
}
}

View File

@ -0,0 +1,462 @@
use std::fmt;
use std::marker::PhantomData;
use std::time::Duration;
use actix_codec::{AsyncRead, AsyncWrite};
use actix_connect::{
default_connector, Connect as TcpConnect, Connection as TcpConnection,
};
use actix_service::{apply_fn, Service, ServiceExt};
use actix_utils::timeout::{TimeoutError, TimeoutService};
use http::Uri;
use tokio_tcp::TcpStream;
use super::connection::Connection;
use super::error::ConnectError;
use super::pool::{ConnectionPool, Protocol};
#[cfg(feature = "ssl")]
use openssl::ssl::SslConnector;
#[cfg(not(feature = "ssl"))]
type SslConnector = ();
/// Manages http client network connectivity
/// The `Connector` type uses a builder-like combinator pattern for service
/// construction that finishes by calling the `.finish()` method.
///
/// ```rust,ignore
/// use std::time::Duration;
/// use actix_http::client::Connector;
///
/// let connector = Connector::new()
/// .timeout(Duration::from_secs(5))
/// .finish();
/// ```
pub struct Connector<T, U> {
connector: T,
timeout: Duration,
conn_lifetime: Duration,
conn_keep_alive: Duration,
disconnect_timeout: Duration,
limit: usize,
#[allow(dead_code)]
ssl: SslConnector,
_t: PhantomData<U>,
}
impl Connector<(), ()> {
pub fn new() -> Connector<
impl Service<
Request = TcpConnect<Uri>,
Response = TcpConnection<Uri, TcpStream>,
Error = actix_connect::ConnectError,
> + Clone,
TcpStream,
> {
let ssl = {
#[cfg(feature = "ssl")]
{
use openssl::ssl::SslMethod;
let mut ssl = SslConnector::builder(SslMethod::tls()).unwrap();
let _ = ssl
.set_alpn_protos(b"\x02h2\x08http/1.1")
.map_err(|e| error!("Can not set alpn protocol: {:?}", e));
ssl.build()
}
#[cfg(not(feature = "ssl"))]
{}
};
Connector {
ssl,
connector: default_connector(),
timeout: Duration::from_secs(1),
conn_lifetime: Duration::from_secs(75),
conn_keep_alive: Duration::from_secs(15),
disconnect_timeout: Duration::from_millis(3000),
limit: 100,
_t: PhantomData,
}
}
}
impl<T, U> Connector<T, U> {
/// Use custom connector.
pub fn connector<T1, U1>(self, connector: T1) -> Connector<T1, U1>
where
U1: AsyncRead + AsyncWrite + fmt::Debug,
T1: Service<
Request = TcpConnect<Uri>,
Response = TcpConnection<Uri, U1>,
Error = actix_connect::ConnectError,
> + Clone,
{
Connector {
connector,
timeout: self.timeout,
conn_lifetime: self.conn_lifetime,
conn_keep_alive: self.conn_keep_alive,
disconnect_timeout: self.disconnect_timeout,
limit: self.limit,
ssl: self.ssl,
_t: PhantomData,
}
}
}
impl<T, U> Connector<T, U>
where
U: AsyncRead + AsyncWrite + fmt::Debug + 'static,
T: Service<
Request = TcpConnect<Uri>,
Response = TcpConnection<Uri, U>,
Error = actix_connect::ConnectError,
> + Clone,
{
/// Connection timeout, i.e. max time to connect to remote host including dns name resolution.
/// Set to 1 second by default.
pub fn timeout(mut self, timeout: Duration) -> Self {
self.timeout = timeout;
self
}
#[cfg(feature = "ssl")]
/// Use custom `SslConnector` instance.
pub fn ssl(mut self, connector: SslConnector) -> Self {
self.ssl = connector;
self
}
/// Set total number of simultaneous connections per type of scheme.
///
/// If limit is 0, the connector has no limit.
/// The default limit size is 100.
pub fn limit(mut self, limit: usize) -> Self {
self.limit = limit;
self
}
/// Set keep-alive period for opened connection.
///
/// Keep-alive period is the period between connection usage. If
/// the delay between repeated usages of the same connection
/// exceeds this period, the connection is closed.
/// Default keep-alive period is 15 seconds.
pub fn conn_keep_alive(mut self, dur: Duration) -> Self {
self.conn_keep_alive = dur;
self
}
/// Set max lifetime period for connection.
///
/// Connection lifetime is max lifetime of any opened connection
/// until it is closed regardless of keep-alive period.
/// Default lifetime period is 75 seconds.
pub fn conn_lifetime(mut self, dur: Duration) -> Self {
self.conn_lifetime = dur;
self
}
/// Set server connection disconnect timeout in milliseconds.
///
/// Defines a timeout for disconnect connection. If a disconnect procedure does not complete
/// within this time, the socket get dropped. This timeout affects only secure connections.
///
/// To disable timeout set value to 0.
///
/// By default disconnect timeout is set to 3000 milliseconds.
pub fn disconnect_timeout(mut self, dur: Duration) -> Self {
self.disconnect_timeout = dur;
self
}
/// Finish configuration process and create connector service.
/// The Connector builder always concludes by calling `finish()` last in
/// its combinator chain.
pub fn finish(
self,
) -> impl Service<Request = Uri, Response = impl Connection, Error = ConnectError> + Clone
{
#[cfg(not(feature = "ssl"))]
{
let connector = TimeoutService::new(
self.timeout,
apply_fn(self.connector, |msg: Uri, srv| srv.call(msg.into()))
.map_err(ConnectError::from)
.map(|stream| (stream.into_parts().0, Protocol::Http1)),
)
.map_err(|e| match e {
TimeoutError::Service(e) => e,
TimeoutError::Timeout => ConnectError::Timeout,
});
connect_impl::InnerConnector {
tcp_pool: ConnectionPool::new(
connector,
self.conn_lifetime,
self.conn_keep_alive,
None,
self.limit,
),
}
}
#[cfg(feature = "ssl")]
{
const H2: &[u8] = b"h2";
use actix_connect::ssl::OpensslConnector;
let ssl_service = TimeoutService::new(
self.timeout,
apply_fn(self.connector.clone(), |msg: Uri, srv| srv.call(msg.into()))
.map_err(ConnectError::from)
.and_then(
OpensslConnector::service(self.ssl)
.map_err(ConnectError::from)
.map(|stream| {
let sock = stream.into_parts().0;
let h2 = sock
.get_ref()
.ssl()
.selected_alpn_protocol()
.map(|protos| protos.windows(2).any(|w| w == H2))
.unwrap_or(false);
if h2 {
(sock, Protocol::Http2)
} else {
(sock, Protocol::Http1)
}
}),
),
)
.map_err(|e| match e {
TimeoutError::Service(e) => e,
TimeoutError::Timeout => ConnectError::Timeout,
});
let tcp_service = TimeoutService::new(
self.timeout,
apply_fn(self.connector.clone(), |msg: Uri, srv| srv.call(msg.into()))
.map_err(ConnectError::from)
.map(|stream| (stream.into_parts().0, Protocol::Http1)),
)
.map_err(|e| match e {
TimeoutError::Service(e) => e,
TimeoutError::Timeout => ConnectError::Timeout,
});
connect_impl::InnerConnector {
tcp_pool: ConnectionPool::new(
tcp_service,
self.conn_lifetime,
self.conn_keep_alive,
None,
self.limit,
),
ssl_pool: ConnectionPool::new(
ssl_service,
self.conn_lifetime,
self.conn_keep_alive,
Some(self.disconnect_timeout),
self.limit,
),
}
}
}
#[doc(hidden)]
#[deprecated(since = "0.1.0-alpha4", note = "please use `.finish()` method")]
pub fn service(
self,
) -> impl Service<Request = Uri, Response = impl Connection, Error = ConnectError> + Clone
{
self.finish()
}
}
#[cfg(not(feature = "ssl"))]
mod connect_impl {
use futures::future::{err, Either, FutureResult};
use futures::Poll;
use super::*;
use crate::client::connection::IoConnection;
pub(crate) struct InnerConnector<T, Io>
where
Io: AsyncRead + AsyncWrite + 'static,
T: Service<Request = Uri, Response = (Io, Protocol), Error = ConnectError>,
{
pub(crate) tcp_pool: ConnectionPool<T, Io>,
}
impl<T, Io> Clone for InnerConnector<T, Io>
where
Io: AsyncRead + AsyncWrite + 'static,
T: Service<Request = Uri, Response = (Io, Protocol), Error = ConnectError>
+ Clone,
{
fn clone(&self) -> Self {
InnerConnector {
tcp_pool: self.tcp_pool.clone(),
}
}
}
impl<T, Io> Service for InnerConnector<T, Io>
where
Io: AsyncRead + AsyncWrite + 'static,
T: Service<Request = Uri, Response = (Io, Protocol), Error = ConnectError>,
{
type Request = Uri;
type Response = IoConnection<Io>;
type Error = ConnectError;
type Future = Either<
<ConnectionPool<T, Io> as Service>::Future,
FutureResult<IoConnection<Io>, ConnectError>,
>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.tcp_pool.poll_ready()
}
fn call(&mut self, req: Uri) -> Self::Future {
match req.scheme_str() {
Some("https") | Some("wss") => {
Either::B(err(ConnectError::SslIsNotSupported))
}
_ => Either::A(self.tcp_pool.call(req)),
}
}
}
}
#[cfg(feature = "ssl")]
mod connect_impl {
use std::marker::PhantomData;
use futures::future::{Either, FutureResult};
use futures::{Async, Future, Poll};
use super::*;
use crate::client::connection::EitherConnection;
pub(crate) struct InnerConnector<T1, T2, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + 'static,
Io2: AsyncRead + AsyncWrite + 'static,
T1: Service<Request = Uri, Response = (Io1, Protocol), Error = ConnectError>,
T2: Service<Request = Uri, Response = (Io2, Protocol), Error = ConnectError>,
{
pub(crate) tcp_pool: ConnectionPool<T1, Io1>,
pub(crate) ssl_pool: ConnectionPool<T2, Io2>,
}
impl<T1, T2, Io1, Io2> Clone for InnerConnector<T1, T2, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + 'static,
Io2: AsyncRead + AsyncWrite + 'static,
T1: Service<Request = Uri, Response = (Io1, Protocol), Error = ConnectError>
+ Clone,
T2: Service<Request = Uri, Response = (Io2, Protocol), Error = ConnectError>
+ Clone,
{
fn clone(&self) -> Self {
InnerConnector {
tcp_pool: self.tcp_pool.clone(),
ssl_pool: self.ssl_pool.clone(),
}
}
}
impl<T1, T2, Io1, Io2> Service for InnerConnector<T1, T2, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + 'static,
Io2: AsyncRead + AsyncWrite + 'static,
T1: Service<Request = Uri, Response = (Io1, Protocol), Error = ConnectError>,
T2: Service<Request = Uri, Response = (Io2, Protocol), Error = ConnectError>,
{
type Request = Uri;
type Response = EitherConnection<Io1, Io2>;
type Error = ConnectError;
type Future = Either<
FutureResult<Self::Response, Self::Error>,
Either<
InnerConnectorResponseA<T1, Io1, Io2>,
InnerConnectorResponseB<T2, Io1, Io2>,
>,
>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.tcp_pool.poll_ready()
}
fn call(&mut self, req: Uri) -> Self::Future {
match req.scheme_str() {
Some("https") | Some("wss") => {
Either::B(Either::B(InnerConnectorResponseB {
fut: self.ssl_pool.call(req),
_t: PhantomData,
}))
}
_ => Either::B(Either::A(InnerConnectorResponseA {
fut: self.tcp_pool.call(req),
_t: PhantomData,
})),
}
}
}
pub(crate) struct InnerConnectorResponseA<T, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + 'static,
T: Service<Request = Uri, Response = (Io1, Protocol), Error = ConnectError>,
{
fut: <ConnectionPool<T, Io1> as Service>::Future,
_t: PhantomData<Io2>,
}
impl<T, Io1, Io2> Future for InnerConnectorResponseA<T, Io1, Io2>
where
T: Service<Request = Uri, Response = (Io1, Protocol), Error = ConnectError>,
Io1: AsyncRead + AsyncWrite + 'static,
Io2: AsyncRead + AsyncWrite + 'static,
{
type Item = EitherConnection<Io1, Io2>;
type Error = ConnectError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.fut.poll()? {
Async::NotReady => Ok(Async::NotReady),
Async::Ready(res) => Ok(Async::Ready(EitherConnection::A(res))),
}
}
}
pub(crate) struct InnerConnectorResponseB<T, Io1, Io2>
where
Io2: AsyncRead + AsyncWrite + 'static,
T: Service<Request = Uri, Response = (Io2, Protocol), Error = ConnectError>,
{
fut: <ConnectionPool<T, Io2> as Service>::Future,
_t: PhantomData<Io1>,
}
impl<T, Io1, Io2> Future for InnerConnectorResponseB<T, Io1, Io2>
where
T: Service<Request = Uri, Response = (Io2, Protocol), Error = ConnectError>,
Io1: AsyncRead + AsyncWrite + 'static,
Io2: AsyncRead + AsyncWrite + 'static,
{
type Item = EitherConnection<Io1, Io2>;
type Error = ConnectError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.fut.poll()? {
Async::NotReady => Ok(Async::NotReady),
Async::Ready(res) => Ok(Async::Ready(EitherConnection::B(res))),
}
}
}
}

View File

@ -0,0 +1,130 @@
use std::io;
use derive_more::{Display, From};
use trust_dns_resolver::error::ResolveError;
#[cfg(feature = "ssl")]
use openssl::ssl::{Error as SslError, HandshakeError};
use crate::error::{Error, ParseError, ResponseError};
use crate::http::Error as HttpError;
use crate::response::Response;
/// A set of errors that can occur while connecting to an HTTP host
#[derive(Debug, Display, From)]
pub enum ConnectError {
/// SSL feature is not enabled
#[display(fmt = "SSL is not supported")]
SslIsNotSupported,
/// SSL error
#[cfg(feature = "ssl")]
#[display(fmt = "{}", _0)]
SslError(SslError),
/// Failed to resolve the hostname
#[display(fmt = "Failed resolving hostname: {}", _0)]
Resolver(ResolveError),
/// No dns records
#[display(fmt = "No dns records found for the input")]
NoRecords,
/// Http2 error
#[display(fmt = "{}", _0)]
H2(h2::Error),
/// Connecting took too long
#[display(fmt = "Timeout out while establishing connection")]
Timeout,
/// Connector has been disconnected
#[display(fmt = "Internal error: connector has been disconnected")]
Disconnected,
/// Unresolved host name
#[display(fmt = "Connector received `Connect` method with unresolved host")]
Unresolverd,
/// Connection io error
#[display(fmt = "{}", _0)]
Io(io::Error),
}
impl From<actix_connect::ConnectError> for ConnectError {
fn from(err: actix_connect::ConnectError) -> ConnectError {
match err {
actix_connect::ConnectError::Resolver(e) => ConnectError::Resolver(e),
actix_connect::ConnectError::NoRecords => ConnectError::NoRecords,
actix_connect::ConnectError::InvalidInput => panic!(),
actix_connect::ConnectError::Unresolverd => ConnectError::Unresolverd,
actix_connect::ConnectError::Io(e) => ConnectError::Io(e),
}
}
}
#[cfg(feature = "ssl")]
impl<T> From<HandshakeError<T>> for ConnectError {
fn from(err: HandshakeError<T>) -> ConnectError {
match err {
HandshakeError::SetupFailure(stack) => SslError::from(stack).into(),
HandshakeError::Failure(stream) => stream.into_error().into(),
HandshakeError::WouldBlock(stream) => stream.into_error().into(),
}
}
}
#[derive(Debug, Display, From)]
pub enum InvalidUrl {
#[display(fmt = "Missing url scheme")]
MissingScheme,
#[display(fmt = "Unknown url scheme")]
UnknownScheme,
#[display(fmt = "Missing host name")]
MissingHost,
#[display(fmt = "Url parse error: {}", _0)]
HttpError(http::Error),
}
/// A set of errors that can occur during request sending and response reading
#[derive(Debug, Display, From)]
pub enum SendRequestError {
/// Invalid URL
#[display(fmt = "Invalid URL: {}", _0)]
Url(InvalidUrl),
/// Failed to connect to host
#[display(fmt = "Failed to connect to host: {}", _0)]
Connect(ConnectError),
/// Error sending request
Send(io::Error),
/// Error parsing response
Response(ParseError),
/// Http error
#[display(fmt = "{}", _0)]
Http(HttpError),
/// Http2 error
#[display(fmt = "{}", _0)]
H2(h2::Error),
/// Response took too long
#[display(fmt = "Timeout out while waiting for response")]
Timeout,
/// Tunnels are not supported for http2 connection
#[display(fmt = "Tunnels are not supported for http2 connection")]
TunnelNotSupported,
/// Error sending request body
Body(Error),
}
/// Convert `SendRequestError` to a server `Response`
impl ResponseError for SendRequestError {
fn error_response(&self) -> Response {
match *self {
SendRequestError::Connect(ConnectError::Timeout) => {
Response::GatewayTimeout()
}
SendRequestError::Connect(_) => Response::BadGateway(),
_ => Response::InternalServerError(),
}
.into()
}
}

View File

@ -0,0 +1,297 @@
use std::io::Write;
use std::{io, time};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use bytes::{BufMut, Bytes, BytesMut};
use futures::future::{ok, Either};
use futures::{Async, Future, Poll, Sink, Stream};
use crate::error::PayloadError;
use crate::h1;
use crate::http::header::{IntoHeaderValue, HOST};
use crate::message::{RequestHead, ResponseHead};
use crate::payload::{Payload, PayloadStream};
use super::connection::{ConnectionLifetime, ConnectionType, IoConnection};
use super::error::{ConnectError, SendRequestError};
use super::pool::Acquired;
use crate::body::{BodySize, MessageBody};
pub(crate) fn send_request<T, B>(
io: T,
mut head: RequestHead,
body: B,
created: time::Instant,
pool: Option<Acquired<T>>,
) -> impl Future<Item = (ResponseHead, Payload), Error = SendRequestError>
where
T: AsyncRead + AsyncWrite + 'static,
B: MessageBody,
{
// set request host header
if !head.headers.contains_key(HOST) {
if let Some(host) = head.uri.host() {
let mut wrt = BytesMut::with_capacity(host.len() + 5).writer();
let _ = match head.uri.port_u16() {
None | Some(80) | Some(443) => write!(wrt, "{}", host),
Some(port) => write!(wrt, "{}:{}", host, port),
};
match wrt.get_mut().take().freeze().try_into() {
Ok(value) => {
head.headers.insert(HOST, value);
}
Err(e) => {
log::error!("Can not set HOST header {}", e);
}
}
}
}
let io = H1Connection {
created,
pool,
io: Some(io),
};
let len = body.size();
// create Framed and send reqest
Framed::new(io, h1::ClientCodec::default())
.send((head, len).into())
.from_err()
// send request body
.and_then(move |framed| match body.size() {
BodySize::None | BodySize::Empty | BodySize::Sized(0) => {
Either::A(ok(framed))
}
_ => Either::B(SendBody::new(body, framed)),
})
// read response and init read body
.and_then(|framed| {
framed
.into_future()
.map_err(|(e, _)| SendRequestError::from(e))
.and_then(|(item, framed)| {
if let Some(res) = item {
match framed.get_codec().message_type() {
h1::MessageType::None => {
let force_close = !framed.get_codec().keepalive();
release_connection(framed, force_close);
Ok((res, Payload::None))
}
_ => {
let pl: PayloadStream = Box::new(PlStream::new(framed));
Ok((res, pl.into()))
}
}
} else {
Err(ConnectError::Disconnected.into())
}
})
})
}
pub(crate) fn open_tunnel<T>(
io: T,
head: RequestHead,
) -> impl Future<Item = (ResponseHead, Framed<T, h1::ClientCodec>), Error = SendRequestError>
where
T: AsyncRead + AsyncWrite + 'static,
{
// create Framed and send reqest
Framed::new(io, h1::ClientCodec::default())
.send((head, BodySize::None).into())
.from_err()
// read response
.and_then(|framed| {
framed
.into_future()
.map_err(|(e, _)| SendRequestError::from(e))
.and_then(|(head, framed)| {
if let Some(head) = head {
Ok((head, framed))
} else {
Err(SendRequestError::from(ConnectError::Disconnected))
}
})
})
}
#[doc(hidden)]
/// HTTP client connection
pub struct H1Connection<T> {
io: Option<T>,
created: time::Instant,
pool: Option<Acquired<T>>,
}
impl<T: AsyncRead + AsyncWrite + 'static> ConnectionLifetime for H1Connection<T> {
/// Close connection
fn close(&mut self) {
if let Some(mut pool) = self.pool.take() {
if let Some(io) = self.io.take() {
pool.close(IoConnection::new(
ConnectionType::H1(io),
self.created,
None,
));
}
}
}
/// Release this connection to the connection pool
fn release(&mut self) {
if let Some(mut pool) = self.pool.take() {
if let Some(io) = self.io.take() {
pool.release(IoConnection::new(
ConnectionType::H1(io),
self.created,
None,
));
}
}
}
}
impl<T: AsyncRead + AsyncWrite + 'static> io::Read for H1Connection<T> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.io.as_mut().unwrap().read(buf)
}
}
impl<T: AsyncRead + AsyncWrite + 'static> AsyncRead for H1Connection<T> {}
impl<T: AsyncRead + AsyncWrite + 'static> io::Write for H1Connection<T> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.io.as_mut().unwrap().write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.io.as_mut().unwrap().flush()
}
}
impl<T: AsyncRead + AsyncWrite + 'static> AsyncWrite for H1Connection<T> {
fn shutdown(&mut self) -> Poll<(), io::Error> {
self.io.as_mut().unwrap().shutdown()
}
}
/// Future responsible for sending request body to the peer
pub(crate) struct SendBody<I, B> {
body: Option<B>,
framed: Option<Framed<I, h1::ClientCodec>>,
flushed: bool,
}
impl<I, B> SendBody<I, B>
where
I: AsyncRead + AsyncWrite + 'static,
B: MessageBody,
{
pub(crate) fn new(body: B, framed: Framed<I, h1::ClientCodec>) -> Self {
SendBody {
body: Some(body),
framed: Some(framed),
flushed: true,
}
}
}
impl<I, B> Future for SendBody<I, B>
where
I: ConnectionLifetime,
B: MessageBody,
{
type Item = Framed<I, h1::ClientCodec>;
type Error = SendRequestError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let mut body_ready = true;
loop {
while body_ready
&& self.body.is_some()
&& !self.framed.as_ref().unwrap().is_write_buf_full()
{
match self.body.as_mut().unwrap().poll_next()? {
Async::Ready(item) => {
// check if body is done
if item.is_none() {
let _ = self.body.take();
}
self.flushed = false;
self.framed
.as_mut()
.unwrap()
.force_send(h1::Message::Chunk(item))?;
break;
}
Async::NotReady => body_ready = false,
}
}
if !self.flushed {
match self.framed.as_mut().unwrap().poll_complete()? {
Async::Ready(_) => {
self.flushed = true;
continue;
}
Async::NotReady => return Ok(Async::NotReady),
}
}
if self.body.is_none() {
return Ok(Async::Ready(self.framed.take().unwrap()));
}
return Ok(Async::NotReady);
}
}
}
pub(crate) struct PlStream<Io> {
framed: Option<Framed<Io, h1::ClientPayloadCodec>>,
}
impl<Io: ConnectionLifetime> PlStream<Io> {
fn new(framed: Framed<Io, h1::ClientCodec>) -> Self {
PlStream {
framed: Some(framed.map_codec(|codec| codec.into_payload_codec())),
}
}
}
impl<Io: ConnectionLifetime> Stream for PlStream<Io> {
type Item = Bytes;
type Error = PayloadError;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
match self.framed.as_mut().unwrap().poll()? {
Async::NotReady => Ok(Async::NotReady),
Async::Ready(Some(chunk)) => {
if let Some(chunk) = chunk {
Ok(Async::Ready(Some(chunk)))
} else {
let framed = self.framed.take().unwrap();
let force_close = framed.get_codec().keepalive();
release_connection(framed, force_close);
Ok(Async::Ready(None))
}
}
Async::Ready(None) => Ok(Async::Ready(None)),
}
}
}
fn release_connection<T, U>(framed: Framed<T, U>, force_close: bool)
where
T: ConnectionLifetime,
{
let mut parts = framed.into_parts();
if !force_close && parts.read_buf.is_empty() && parts.write_buf.is_empty() {
parts.io.release()
} else {
parts.io.close()
}
}

View File

@ -0,0 +1,183 @@
use std::time;
use actix_codec::{AsyncRead, AsyncWrite};
use bytes::Bytes;
use futures::future::{err, Either};
use futures::{Async, Future, Poll};
use h2::{client::SendRequest, SendStream};
use http::header::{HeaderValue, CONNECTION, CONTENT_LENGTH, TRANSFER_ENCODING};
use http::{request::Request, HttpTryFrom, Method, Version};
use crate::body::{BodySize, MessageBody};
use crate::message::{RequestHead, ResponseHead};
use crate::payload::Payload;
use super::connection::{ConnectionType, IoConnection};
use super::error::SendRequestError;
use super::pool::Acquired;
pub(crate) fn send_request<T, B>(
io: SendRequest<Bytes>,
head: RequestHead,
body: B,
created: time::Instant,
pool: Option<Acquired<T>>,
) -> impl Future<Item = (ResponseHead, Payload), Error = SendRequestError>
where
T: AsyncRead + AsyncWrite + 'static,
B: MessageBody,
{
trace!("Sending client request: {:?} {:?}", head, body.size());
let head_req = head.method == Method::HEAD;
let length = body.size();
let eof = match length {
BodySize::None | BodySize::Empty | BodySize::Sized(0) => true,
_ => false,
};
io.ready()
.map_err(SendRequestError::from)
.and_then(move |mut io| {
let mut req = Request::new(());
*req.uri_mut() = head.uri;
*req.method_mut() = head.method;
*req.version_mut() = Version::HTTP_2;
let mut skip_len = true;
// let mut has_date = false;
// Content length
let _ = match length {
BodySize::None => None,
BodySize::Stream => {
skip_len = false;
None
}
BodySize::Empty => req
.headers_mut()
.insert(CONTENT_LENGTH, HeaderValue::from_static("0")),
BodySize::Sized(len) => req.headers_mut().insert(
CONTENT_LENGTH,
HeaderValue::try_from(format!("{}", len)).unwrap(),
),
BodySize::Sized64(len) => req.headers_mut().insert(
CONTENT_LENGTH,
HeaderValue::try_from(format!("{}", len)).unwrap(),
),
};
// copy headers
for (key, value) in head.headers.iter() {
match *key {
CONNECTION | TRANSFER_ENCODING => continue, // http2 specific
CONTENT_LENGTH if skip_len => continue,
// DATE => has_date = true,
_ => (),
}
req.headers_mut().append(key, value.clone());
}
match io.send_request(req, eof) {
Ok((res, send)) => {
release(io, pool, created, false);
if !eof {
Either::A(Either::B(
SendBody {
body,
send,
buf: None,
}
.and_then(move |_| res.map_err(SendRequestError::from)),
))
} else {
Either::B(res.map_err(SendRequestError::from))
}
}
Err(e) => {
release(io, pool, created, e.is_io());
Either::A(Either::A(err(e.into())))
}
}
})
.and_then(move |resp| {
let (parts, body) = resp.into_parts();
let payload = if head_req { Payload::None } else { body.into() };
let mut head = ResponseHead::new(parts.status);
head.version = parts.version;
head.headers = parts.headers.into();
Ok((head, payload))
})
.from_err()
}
struct SendBody<B: MessageBody> {
body: B,
send: SendStream<Bytes>,
buf: Option<Bytes>,
}
impl<B: MessageBody> Future for SendBody<B> {
type Item = ();
type Error = SendRequestError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
if self.buf.is_none() {
match self.body.poll_next() {
Ok(Async::Ready(Some(buf))) => {
self.send.reserve_capacity(buf.len());
self.buf = Some(buf);
}
Ok(Async::Ready(None)) => {
if let Err(e) = self.send.send_data(Bytes::new(), true) {
return Err(e.into());
}
self.send.reserve_capacity(0);
return Ok(Async::Ready(()));
}
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(e) => return Err(e.into()),
}
}
match self.send.poll_capacity() {
Ok(Async::NotReady) => return Ok(Async::NotReady),
Ok(Async::Ready(None)) => return Ok(Async::Ready(())),
Ok(Async::Ready(Some(cap))) => {
let mut buf = self.buf.take().unwrap();
let len = buf.len();
let bytes = buf.split_to(std::cmp::min(cap, len));
if let Err(e) = self.send.send_data(bytes, false) {
return Err(e.into());
} else {
if !buf.is_empty() {
self.send.reserve_capacity(buf.len());
self.buf = Some(buf);
}
continue;
}
}
Err(e) => return Err(e.into()),
}
}
}
}
// release SendRequest object
fn release<T: AsyncRead + AsyncWrite + 'static>(
io: SendRequest<Bytes>,
pool: Option<Acquired<T>>,
created: time::Instant,
close: bool,
) {
if let Some(mut pool) = pool {
if close {
pool.close(IoConnection::new(ConnectionType::H2(io), created, None));
} else {
pool.release(IoConnection::new(ConnectionType::H2(io), created, None));
}
}
}

View File

@ -0,0 +1,12 @@
//! Http client api
mod connection;
mod connector;
mod error;
mod h1proto;
mod h2proto;
mod pool;
pub use self::connection::Connection;
pub use self::connector::Connector;
pub use self::error::{ConnectError, InvalidUrl, SendRequestError};
pub use self::pool::Protocol;

View File

@ -0,0 +1,479 @@
use std::cell::RefCell;
use std::collections::VecDeque;
use std::io;
use std::rc::Rc;
use std::time::{Duration, Instant};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_service::Service;
use bytes::Bytes;
use futures::future::{err, ok, Either, FutureResult};
use futures::task::AtomicTask;
use futures::unsync::oneshot;
use futures::{Async, Future, Poll};
use h2::client::{handshake, Handshake};
use hashbrown::HashMap;
use http::uri::{Authority, Uri};
use indexmap::IndexSet;
use slab::Slab;
use tokio_timer::{sleep, Delay};
use super::connection::{ConnectionType, IoConnection};
use super::error::ConnectError;
#[derive(Clone, Copy, PartialEq)]
/// Protocol version
pub enum Protocol {
Http1,
Http2,
}
#[derive(Hash, Eq, PartialEq, Clone, Debug)]
pub(crate) struct Key {
authority: Authority,
}
impl From<Authority> for Key {
fn from(authority: Authority) -> Key {
Key { authority }
}
}
/// Connections pool
pub(crate) struct ConnectionPool<T, Io: AsyncRead + AsyncWrite + 'static>(
T,
Rc<RefCell<Inner<Io>>>,
);
impl<T, Io> ConnectionPool<T, Io>
where
Io: AsyncRead + AsyncWrite + 'static,
T: Service<Request = Uri, Response = (Io, Protocol), Error = ConnectError>,
{
pub(crate) fn new(
connector: T,
conn_lifetime: Duration,
conn_keep_alive: Duration,
disconnect_timeout: Option<Duration>,
limit: usize,
) -> Self {
ConnectionPool(
connector,
Rc::new(RefCell::new(Inner {
conn_lifetime,
conn_keep_alive,
disconnect_timeout,
limit,
acquired: 0,
waiters: Slab::new(),
waiters_queue: IndexSet::new(),
available: HashMap::new(),
task: AtomicTask::new(),
})),
)
}
}
impl<T, Io> Clone for ConnectionPool<T, Io>
where
T: Clone,
Io: AsyncRead + AsyncWrite + 'static,
{
fn clone(&self) -> Self {
ConnectionPool(self.0.clone(), self.1.clone())
}
}
impl<T, Io> Service for ConnectionPool<T, Io>
where
Io: AsyncRead + AsyncWrite + 'static,
T: Service<Request = Uri, Response = (Io, Protocol), Error = ConnectError>,
{
type Request = Uri;
type Response = IoConnection<Io>;
type Error = ConnectError;
type Future = Either<
FutureResult<Self::Response, Self::Error>,
Either<WaitForConnection<Io>, OpenConnection<T::Future, Io>>,
>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.0.poll_ready()
}
fn call(&mut self, req: Uri) -> Self::Future {
let key = if let Some(authority) = req.authority_part() {
authority.clone().into()
} else {
return Either::A(err(ConnectError::Unresolverd));
};
// acquire connection
match self.1.as_ref().borrow_mut().acquire(&key) {
Acquire::Acquired(io, created) => {
// use existing connection
Either::A(ok(IoConnection::new(
io,
created,
Some(Acquired(key, Some(self.1.clone()))),
)))
}
Acquire::NotAvailable => {
// connection is not available, wait
let (rx, token) = self.1.as_ref().borrow_mut().wait_for(req);
Either::B(Either::A(WaitForConnection {
rx,
key,
token,
inner: Some(self.1.clone()),
}))
}
Acquire::Available => {
// open new connection
Either::B(Either::B(OpenConnection::new(
key,
self.1.clone(),
self.0.call(req),
)))
}
}
}
}
#[doc(hidden)]
pub struct WaitForConnection<Io>
where
Io: AsyncRead + AsyncWrite + 'static,
{
key: Key,
token: usize,
rx: oneshot::Receiver<Result<IoConnection<Io>, ConnectError>>,
inner: Option<Rc<RefCell<Inner<Io>>>>,
}
impl<Io> Drop for WaitForConnection<Io>
where
Io: AsyncRead + AsyncWrite + 'static,
{
fn drop(&mut self) {
if let Some(i) = self.inner.take() {
let mut inner = i.as_ref().borrow_mut();
inner.release_waiter(&self.key, self.token);
inner.check_availibility();
}
}
}
impl<Io> Future for WaitForConnection<Io>
where
Io: AsyncRead + AsyncWrite,
{
type Item = IoConnection<Io>;
type Error = ConnectError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.rx.poll() {
Ok(Async::Ready(item)) => match item {
Err(err) => Err(err),
Ok(conn) => {
let _ = self.inner.take();
Ok(Async::Ready(conn))
}
},
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(_) => {
let _ = self.inner.take();
Err(ConnectError::Disconnected)
}
}
}
}
#[doc(hidden)]
pub struct OpenConnection<F, Io>
where
Io: AsyncRead + AsyncWrite + 'static,
{
fut: F,
key: Key,
h2: Option<Handshake<Io, Bytes>>,
inner: Option<Rc<RefCell<Inner<Io>>>>,
}
impl<F, Io> OpenConnection<F, Io>
where
F: Future<Item = (Io, Protocol), Error = ConnectError>,
Io: AsyncRead + AsyncWrite + 'static,
{
fn new(key: Key, inner: Rc<RefCell<Inner<Io>>>, fut: F) -> Self {
OpenConnection {
key,
fut,
inner: Some(inner),
h2: None,
}
}
}
impl<F, Io> Drop for OpenConnection<F, Io>
where
Io: AsyncRead + AsyncWrite + 'static,
{
fn drop(&mut self) {
if let Some(inner) = self.inner.take() {
let mut inner = inner.as_ref().borrow_mut();
inner.release();
inner.check_availibility();
}
}
}
impl<F, Io> Future for OpenConnection<F, Io>
where
F: Future<Item = (Io, Protocol), Error = ConnectError>,
Io: AsyncRead + AsyncWrite,
{
type Item = IoConnection<Io>;
type Error = ConnectError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if let Some(ref mut h2) = self.h2 {
return match h2.poll() {
Ok(Async::Ready((snd, connection))) => {
tokio_current_thread::spawn(connection.map_err(|_| ()));
Ok(Async::Ready(IoConnection::new(
ConnectionType::H2(snd),
Instant::now(),
Some(Acquired(self.key.clone(), self.inner.clone())),
)))
}
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(e) => Err(e.into()),
};
}
match self.fut.poll() {
Err(err) => Err(err),
Ok(Async::Ready((io, proto))) => {
let _ = self.inner.take();
if proto == Protocol::Http1 {
Ok(Async::Ready(IoConnection::new(
ConnectionType::H1(io),
Instant::now(),
Some(Acquired(self.key.clone(), self.inner.clone())),
)))
} else {
self.h2 = Some(handshake(io));
self.poll()
}
}
Ok(Async::NotReady) => Ok(Async::NotReady),
}
}
}
enum Acquire<T> {
Acquired(ConnectionType<T>, Instant),
Available,
NotAvailable,
}
// #[derive(Debug)]
struct AvailableConnection<Io> {
io: ConnectionType<Io>,
used: Instant,
created: Instant,
}
pub(crate) struct Inner<Io> {
conn_lifetime: Duration,
conn_keep_alive: Duration,
disconnect_timeout: Option<Duration>,
limit: usize,
acquired: usize,
available: HashMap<Key, VecDeque<AvailableConnection<Io>>>,
waiters: Slab<(Uri, oneshot::Sender<Result<IoConnection<Io>, ConnectError>>)>,
waiters_queue: IndexSet<(Key, usize)>,
task: AtomicTask,
}
impl<Io> Inner<Io> {
fn reserve(&mut self) {
self.acquired += 1;
}
fn release(&mut self) {
self.acquired -= 1;
}
fn release_waiter(&mut self, key: &Key, token: usize) {
self.waiters.remove(token);
self.waiters_queue.remove(&(key.clone(), token));
}
fn release_conn(&mut self, key: &Key, io: ConnectionType<Io>, created: Instant) {
self.acquired -= 1;
self.available
.entry(key.clone())
.or_insert_with(VecDeque::new)
.push_back(AvailableConnection {
io,
created,
used: Instant::now(),
});
}
}
impl<Io> Inner<Io>
where
Io: AsyncRead + AsyncWrite + 'static,
{
/// connection is not available, wait
fn wait_for(
&mut self,
connect: Uri,
) -> (
oneshot::Receiver<Result<IoConnection<Io>, ConnectError>>,
usize,
) {
let (tx, rx) = oneshot::channel();
let key: Key = connect.authority_part().unwrap().clone().into();
let entry = self.waiters.vacant_entry();
let token = entry.key();
entry.insert((connect, tx));
assert!(!self.waiters_queue.insert((key, token)));
(rx, token)
}
fn acquire(&mut self, key: &Key) -> Acquire<Io> {
// check limits
if self.limit > 0 && self.acquired >= self.limit {
return Acquire::NotAvailable;
}
self.reserve();
// check if open connection is available
// cleanup stale connections at the same time
if let Some(ref mut connections) = self.available.get_mut(key) {
let now = Instant::now();
while let Some(conn) = connections.pop_back() {
// check if it still usable
if (now - conn.used) > self.conn_keep_alive
|| (now - conn.created) > self.conn_lifetime
{
if let Some(timeout) = self.disconnect_timeout {
if let ConnectionType::H1(io) = conn.io {
tokio_current_thread::spawn(CloseConnection::new(
io, timeout,
))
}
}
} else {
let mut io = conn.io;
let mut buf = [0; 2];
if let ConnectionType::H1(ref mut s) = io {
match s.read(&mut buf) {
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => (),
Ok(n) if n > 0 => {
if let Some(timeout) = self.disconnect_timeout {
if let ConnectionType::H1(io) = io {
tokio_current_thread::spawn(
CloseConnection::new(io, timeout),
)
}
}
continue;
}
Ok(_) | Err(_) => continue,
}
}
return Acquire::Acquired(io, conn.created);
}
}
}
Acquire::Available
}
fn release_close(&mut self, io: ConnectionType<Io>) {
self.acquired -= 1;
if let Some(timeout) = self.disconnect_timeout {
if let ConnectionType::H1(io) = io {
tokio_current_thread::spawn(CloseConnection::new(io, timeout))
}
}
}
fn check_availibility(&self) {
if !self.waiters_queue.is_empty() && self.acquired < self.limit {
self.task.notify()
}
}
}
struct CloseConnection<T> {
io: T,
timeout: Delay,
}
impl<T> CloseConnection<T>
where
T: AsyncWrite,
{
fn new(io: T, timeout: Duration) -> Self {
CloseConnection {
io,
timeout: sleep(timeout),
}
}
}
impl<T> Future for CloseConnection<T>
where
T: AsyncWrite,
{
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<(), ()> {
match self.timeout.poll() {
Ok(Async::Ready(_)) | Err(_) => Ok(Async::Ready(())),
Ok(Async::NotReady) => match self.io.shutdown() {
Ok(Async::Ready(_)) | Err(_) => Ok(Async::Ready(())),
Ok(Async::NotReady) => Ok(Async::NotReady),
},
}
}
}
pub(crate) struct Acquired<T>(Key, Option<Rc<RefCell<Inner<T>>>>);
impl<T> Acquired<T>
where
T: AsyncRead + AsyncWrite + 'static,
{
pub(crate) fn close(&mut self, conn: IoConnection<T>) {
if let Some(inner) = self.1.take() {
let (io, _) = conn.into_inner();
inner.as_ref().borrow_mut().release_close(io);
}
}
pub(crate) fn release(&mut self, conn: IoConnection<T>) {
if let Some(inner) = self.1.take() {
let (io, created) = conn.into_inner();
inner
.as_ref()
.borrow_mut()
.release_conn(&self.0, io, created);
}
}
}
impl<T> Drop for Acquired<T> {
fn drop(&mut self) {
if let Some(inner) = self.1.take() {
inner.as_ref().borrow_mut().release();
}
}
}

290
actix-http/src/config.rs Normal file
View File

@ -0,0 +1,290 @@
use std::cell::UnsafeCell;
use std::fmt;
use std::fmt::Write;
use std::rc::Rc;
use std::time::{Duration, Instant};
use bytes::BytesMut;
use futures::{future, Future};
use time;
use tokio_timer::{sleep, Delay};
// "Sun, 06 Nov 1994 08:49:37 GMT".len()
const DATE_VALUE_LENGTH: usize = 29;
#[derive(Debug, PartialEq, Clone, Copy)]
/// Server keep-alive setting
pub enum KeepAlive {
/// Keep alive in seconds
Timeout(usize),
/// Relay on OS to shutdown tcp connection
Os,
/// Disabled
Disabled,
}
impl From<usize> for KeepAlive {
fn from(keepalive: usize) -> Self {
KeepAlive::Timeout(keepalive)
}
}
impl From<Option<usize>> for KeepAlive {
fn from(keepalive: Option<usize>) -> Self {
if let Some(keepalive) = keepalive {
KeepAlive::Timeout(keepalive)
} else {
KeepAlive::Disabled
}
}
}
/// Http service configuration
pub struct ServiceConfig(Rc<Inner>);
struct Inner {
keep_alive: Option<Duration>,
client_timeout: u64,
client_disconnect: u64,
ka_enabled: bool,
timer: DateService,
}
impl Clone for ServiceConfig {
fn clone(&self) -> Self {
ServiceConfig(self.0.clone())
}
}
impl Default for ServiceConfig {
fn default() -> Self {
Self::new(KeepAlive::Timeout(5), 0, 0)
}
}
impl ServiceConfig {
/// Create instance of `ServiceConfig`
pub fn new(
keep_alive: KeepAlive,
client_timeout: u64,
client_disconnect: u64,
) -> ServiceConfig {
let (keep_alive, ka_enabled) = match keep_alive {
KeepAlive::Timeout(val) => (val as u64, true),
KeepAlive::Os => (0, true),
KeepAlive::Disabled => (0, false),
};
let keep_alive = if ka_enabled && keep_alive > 0 {
Some(Duration::from_secs(keep_alive))
} else {
None
};
ServiceConfig(Rc::new(Inner {
keep_alive,
ka_enabled,
client_timeout,
client_disconnect,
timer: DateService::new(),
}))
}
#[inline]
/// Keep alive duration if configured.
pub fn keep_alive(&self) -> Option<Duration> {
self.0.keep_alive
}
#[inline]
/// Return state of connection keep-alive funcitonality
pub fn keep_alive_enabled(&self) -> bool {
self.0.ka_enabled
}
#[inline]
/// Client timeout for first request.
pub fn client_timer(&self) -> Option<Delay> {
let delay = self.0.client_timeout;
if delay != 0 {
Some(Delay::new(
self.0.timer.now() + Duration::from_millis(delay),
))
} else {
None
}
}
/// Client timeout for first request.
pub fn client_timer_expire(&self) -> Option<Instant> {
let delay = self.0.client_timeout;
if delay != 0 {
Some(self.0.timer.now() + Duration::from_millis(delay))
} else {
None
}
}
/// Client disconnect timer
pub fn client_disconnect_timer(&self) -> Option<Instant> {
let delay = self.0.client_disconnect;
if delay != 0 {
Some(self.0.timer.now() + Duration::from_millis(delay))
} else {
None
}
}
#[inline]
/// Return keep-alive timer delay is configured.
pub fn keep_alive_timer(&self) -> Option<Delay> {
if let Some(ka) = self.0.keep_alive {
Some(Delay::new(self.0.timer.now() + ka))
} else {
None
}
}
/// Keep-alive expire time
pub fn keep_alive_expire(&self) -> Option<Instant> {
if let Some(ka) = self.0.keep_alive {
Some(self.0.timer.now() + ka)
} else {
None
}
}
#[inline]
pub(crate) fn now(&self) -> Instant {
self.0.timer.now()
}
pub(crate) fn set_date(&self, dst: &mut BytesMut) {
let mut buf: [u8; 39] = [0; 39];
buf[..6].copy_from_slice(b"date: ");
buf[6..35].copy_from_slice(&self.0.timer.date().bytes);
buf[35..].copy_from_slice(b"\r\n\r\n");
dst.extend_from_slice(&buf);
}
pub(crate) fn set_date_header(&self, dst: &mut BytesMut) {
dst.extend_from_slice(&self.0.timer.date().bytes);
}
}
struct Date {
bytes: [u8; DATE_VALUE_LENGTH],
pos: usize,
}
impl Date {
fn new() -> Date {
let mut date = Date {
bytes: [0; DATE_VALUE_LENGTH],
pos: 0,
};
date.update();
date
}
fn update(&mut self) {
self.pos = 0;
write!(self, "{}", time::at_utc(time::get_time()).rfc822()).unwrap();
}
}
impl fmt::Write for Date {
fn write_str(&mut self, s: &str) -> fmt::Result {
let len = s.len();
self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes());
self.pos += len;
Ok(())
}
}
#[derive(Clone)]
struct DateService(Rc<DateServiceInner>);
struct DateServiceInner {
current: UnsafeCell<Option<(Date, Instant)>>,
}
impl DateServiceInner {
fn new() -> Self {
DateServiceInner {
current: UnsafeCell::new(None),
}
}
fn get_ref(&self) -> &Option<(Date, Instant)> {
unsafe { &*self.current.get() }
}
fn reset(&self) {
unsafe { (&mut *self.current.get()).take() };
}
fn update(&self) {
let now = Instant::now();
let date = Date::new();
*(unsafe { &mut *self.current.get() }) = Some((date, now));
}
}
impl DateService {
fn new() -> Self {
DateService(Rc::new(DateServiceInner::new()))
}
fn check_date(&self) {
if self.0.get_ref().is_none() {
self.0.update();
// periodic date update
let s = self.clone();
tokio_current_thread::spawn(sleep(Duration::from_millis(500)).then(
move |_| {
s.0.reset();
future::ok(())
},
));
}
}
fn now(&self) -> Instant {
self.check_date();
self.0.get_ref().as_ref().unwrap().1
}
fn date(&self) -> &Date {
self.check_date();
let item = self.0.get_ref().as_ref().unwrap();
&item.0
}
}
#[cfg(test)]
mod tests {
use super::*;
use actix_rt::System;
use futures::future;
#[test]
fn test_date_len() {
assert_eq!(DATE_VALUE_LENGTH, "Sun, 06 Nov 1994 08:49:37 GMT".len());
}
#[test]
fn test_date() {
let mut rt = System::new("test");
let _ = rt.block_on(future::lazy(|| {
let settings = ServiceConfig::new(KeepAlive::Os, 0, 0);
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf1);
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf2);
assert_eq!(buf1, buf2);
future::ok::<_, ()>(())
}));
}
}

View File

@ -0,0 +1,240 @@
use std::borrow::Cow;
use time::{Duration, Tm};
use super::{Cookie, SameSite};
/// Structure that follows the builder pattern for building `Cookie` structs.
///
/// To construct a cookie:
///
/// 1. Call [`Cookie::build`](struct.Cookie.html#method.build) to start building.
/// 2. Use any of the builder methods to set fields in the cookie.
/// 3. Call [finish](#method.finish) to retrieve the built cookie.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Cookie;
/// use time::Duration;
///
/// # fn main() {
/// let cookie: Cookie = Cookie::build("name", "value")
/// .domain("www.rust-lang.org")
/// .path("/")
/// .secure(true)
/// .http_only(true)
/// .max_age(Duration::days(1))
/// .finish();
/// # }
/// ```
#[derive(Debug, Clone)]
pub struct CookieBuilder {
/// The cookie being built.
cookie: Cookie<'static>,
}
impl CookieBuilder {
/// Creates a new `CookieBuilder` instance from the given name and value.
///
/// This method is typically called indirectly via
/// [Cookie::build](struct.Cookie.html#method.build).
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Cookie;
///
/// let c = Cookie::build("foo", "bar").finish();
/// assert_eq!(c.name_value(), ("foo", "bar"));
/// ```
pub fn new<N, V>(name: N, value: V) -> CookieBuilder
where
N: Into<Cow<'static, str>>,
V: Into<Cow<'static, str>>,
{
CookieBuilder {
cookie: Cookie::new(name, value),
}
}
/// Sets the `expires` field in the cookie being built.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Cookie;
///
/// # fn main() {
/// let c = Cookie::build("foo", "bar")
/// .expires(time::now())
/// .finish();
///
/// assert!(c.expires().is_some());
/// # }
/// ```
#[inline]
pub fn expires(mut self, when: Tm) -> CookieBuilder {
self.cookie.set_expires(when);
self
}
/// Sets the `max_age` field in the cookie being built.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Cookie;
///
/// # fn main() {
/// let c = Cookie::build("foo", "bar")
/// .max_age(time::Duration::minutes(30))
/// .finish();
///
/// assert_eq!(c.max_age(), Some(time::Duration::seconds(30 * 60)));
/// # }
/// ```
#[inline]
pub fn max_age(mut self, value: Duration) -> CookieBuilder {
self.cookie.set_max_age(value);
self
}
/// Sets the `domain` field in the cookie being built.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Cookie;
///
/// let c = Cookie::build("foo", "bar")
/// .domain("www.rust-lang.org")
/// .finish();
///
/// assert_eq!(c.domain(), Some("www.rust-lang.org"));
/// ```
pub fn domain<D: Into<Cow<'static, str>>>(mut self, value: D) -> CookieBuilder {
self.cookie.set_domain(value);
self
}
/// Sets the `path` field in the cookie being built.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Cookie;
///
/// let c = Cookie::build("foo", "bar")
/// .path("/")
/// .finish();
///
/// assert_eq!(c.path(), Some("/"));
/// ```
pub fn path<P: Into<Cow<'static, str>>>(mut self, path: P) -> CookieBuilder {
self.cookie.set_path(path);
self
}
/// Sets the `secure` field in the cookie being built.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Cookie;
///
/// let c = Cookie::build("foo", "bar")
/// .secure(true)
/// .finish();
///
/// assert_eq!(c.secure(), Some(true));
/// ```
#[inline]
pub fn secure(mut self, value: bool) -> CookieBuilder {
self.cookie.set_secure(value);
self
}
/// Sets the `http_only` field in the cookie being built.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Cookie;
///
/// let c = Cookie::build("foo", "bar")
/// .http_only(true)
/// .finish();
///
/// assert_eq!(c.http_only(), Some(true));
/// ```
#[inline]
pub fn http_only(mut self, value: bool) -> CookieBuilder {
self.cookie.set_http_only(value);
self
}
/// Sets the `same_site` field in the cookie being built.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{Cookie, SameSite};
///
/// let c = Cookie::build("foo", "bar")
/// .same_site(SameSite::Strict)
/// .finish();
///
/// assert_eq!(c.same_site(), Some(SameSite::Strict));
/// ```
#[inline]
pub fn same_site(mut self, value: SameSite) -> CookieBuilder {
self.cookie.set_same_site(value);
self
}
/// Makes the cookie being built 'permanent' by extending its expiration and
/// max age 20 years into the future.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Cookie;
/// use time::Duration;
///
/// # fn main() {
/// let c = Cookie::build("foo", "bar")
/// .permanent()
/// .finish();
///
/// assert_eq!(c.max_age(), Some(Duration::days(365 * 20)));
/// # assert!(c.expires().is_some());
/// # }
/// ```
#[inline]
pub fn permanent(mut self) -> CookieBuilder {
self.cookie.make_permanent();
self
}
/// Finishes building and returns the built `Cookie`.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Cookie;
///
/// let c = Cookie::build("foo", "bar")
/// .domain("crates.io")
/// .path("/")
/// .finish();
///
/// assert_eq!(c.name_value(), ("foo", "bar"));
/// assert_eq!(c.domain(), Some("crates.io"));
/// assert_eq!(c.path(), Some("/"));
/// ```
#[inline]
pub fn finish(self) -> Cookie<'static> {
self.cookie
}
}

View File

@ -0,0 +1,71 @@
use std::borrow::Borrow;
use std::hash::{Hash, Hasher};
use std::ops::{Deref, DerefMut};
use super::Cookie;
/// A `DeltaCookie` is a helper structure used in a cookie jar. It wraps a
/// `Cookie` so that it can be hashed and compared purely by name. It further
/// records whether the wrapped cookie is a "removal" cookie, that is, a cookie
/// that when sent to the client removes the named cookie on the client's
/// machine.
#[derive(Clone, Debug)]
pub struct DeltaCookie {
pub cookie: Cookie<'static>,
pub removed: bool,
}
impl DeltaCookie {
/// Create a new `DeltaCookie` that is being added to a jar.
#[inline]
pub fn added(cookie: Cookie<'static>) -> DeltaCookie {
DeltaCookie {
cookie,
removed: false,
}
}
/// Create a new `DeltaCookie` that is being removed from a jar. The
/// `cookie` should be a "removal" cookie.
#[inline]
pub fn removed(cookie: Cookie<'static>) -> DeltaCookie {
DeltaCookie {
cookie,
removed: true,
}
}
}
impl Deref for DeltaCookie {
type Target = Cookie<'static>;
fn deref(&self) -> &Cookie<'static> {
&self.cookie
}
}
impl DerefMut for DeltaCookie {
fn deref_mut(&mut self) -> &mut Cookie<'static> {
&mut self.cookie
}
}
impl PartialEq for DeltaCookie {
fn eq(&self, other: &DeltaCookie) -> bool {
self.name() == other.name()
}
}
impl Eq for DeltaCookie {}
impl Hash for DeltaCookie {
fn hash<H: Hasher>(&self, state: &mut H) {
self.name().hash(state);
}
}
impl Borrow<str> for DeltaCookie {
fn borrow(&self) -> &str {
self.name()
}
}

View File

@ -0,0 +1,98 @@
//! This module contains types that represent cookie properties that are not yet
//! standardized. That is, _draft_ features.
use std::fmt;
/// The `SameSite` cookie attribute.
///
/// A cookie with a `SameSite` attribute is imposed restrictions on when it is
/// sent to the origin server in a cross-site request. If the `SameSite`
/// attribute is "Strict", then the cookie is never sent in cross-site requests.
/// If the `SameSite` attribute is "Lax", the cookie is only sent in cross-site
/// requests with "safe" HTTP methods, i.e, `GET`, `HEAD`, `OPTIONS`, `TRACE`.
/// If the `SameSite` attribute is not present (made explicit via the
/// `SameSite::None` variant), then the cookie will be sent as normal.
///
/// **Note:** This cookie attribute is an HTTP draft! Its meaning and definition
/// are subject to change.
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum SameSite {
/// The "Strict" `SameSite` attribute.
Strict,
/// The "Lax" `SameSite` attribute.
Lax,
/// No `SameSite` attribute.
None,
}
impl SameSite {
/// Returns `true` if `self` is `SameSite::Strict` and `false` otherwise.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::SameSite;
///
/// let strict = SameSite::Strict;
/// assert!(strict.is_strict());
/// assert!(!strict.is_lax());
/// assert!(!strict.is_none());
/// ```
#[inline]
pub fn is_strict(self) -> bool {
match self {
SameSite::Strict => true,
SameSite::Lax | SameSite::None => false,
}
}
/// Returns `true` if `self` is `SameSite::Lax` and `false` otherwise.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::SameSite;
///
/// let lax = SameSite::Lax;
/// assert!(lax.is_lax());
/// assert!(!lax.is_strict());
/// assert!(!lax.is_none());
/// ```
#[inline]
pub fn is_lax(self) -> bool {
match self {
SameSite::Lax => true,
SameSite::Strict | SameSite::None => false,
}
}
/// Returns `true` if `self` is `SameSite::None` and `false` otherwise.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::SameSite;
///
/// let none = SameSite::None;
/// assert!(none.is_none());
/// assert!(!none.is_lax());
/// assert!(!none.is_strict());
/// ```
#[inline]
pub fn is_none(self) -> bool {
match self {
SameSite::None => true,
SameSite::Lax | SameSite::Strict => false,
}
}
}
impl fmt::Display for SameSite {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
SameSite::Strict => write!(f, "Strict"),
SameSite::Lax => write!(f, "Lax"),
SameSite::None => Ok(()),
}
}
}

View File

@ -0,0 +1,655 @@
use std::collections::HashSet;
use std::mem::replace;
use time::{self, Duration};
use super::delta::DeltaCookie;
use super::Cookie;
#[cfg(feature = "secure-cookies")]
use super::secure::{Key, PrivateJar, SignedJar};
/// A collection of cookies that tracks its modifications.
///
/// A `CookieJar` provides storage for any number of cookies. Any changes made
/// to the jar are tracked; the changes can be retrieved via the
/// [delta](#method.delta) method which returns an interator over the changes.
///
/// # Usage
///
/// A jar's life begins via [new](#method.new) and calls to
/// [`add_original`](#method.add_original):
///
/// ```rust
/// use actix_http::cookie::{Cookie, CookieJar};
///
/// let mut jar = CookieJar::new();
/// jar.add_original(Cookie::new("name", "value"));
/// jar.add_original(Cookie::new("second", "another"));
/// ```
///
/// Cookies can be added via [add](#method.add) and removed via
/// [remove](#method.remove). Finally, cookies can be looked up via
/// [get](#method.get):
///
/// ```rust
/// # use actix_http::cookie::{Cookie, CookieJar};
/// let mut jar = CookieJar::new();
/// jar.add(Cookie::new("a", "one"));
/// jar.add(Cookie::new("b", "two"));
///
/// assert_eq!(jar.get("a").map(|c| c.value()), Some("one"));
/// assert_eq!(jar.get("b").map(|c| c.value()), Some("two"));
///
/// jar.remove(Cookie::named("b"));
/// assert!(jar.get("b").is_none());
/// ```
///
/// # Deltas
///
/// A jar keeps track of any modifications made to it over time. The
/// modifications are recorded as cookies. The modifications can be retrieved
/// via [delta](#method.delta). Any new `Cookie` added to a jar via `add`
/// results in the same `Cookie` appearing in the `delta`; cookies added via
/// `add_original` do not count towards the delta. Any _original_ cookie that is
/// removed from a jar results in a "removal" cookie appearing in the delta. A
/// "removal" cookie is a cookie that a server sends so that the cookie is
/// removed from the client's machine.
///
/// Deltas are typically used to create `Set-Cookie` headers corresponding to
/// the changes made to a cookie jar over a period of time.
///
/// ```rust
/// # use actix_http::cookie::{Cookie, CookieJar};
/// let mut jar = CookieJar::new();
///
/// // original cookies don't affect the delta
/// jar.add_original(Cookie::new("original", "value"));
/// assert_eq!(jar.delta().count(), 0);
///
/// // new cookies result in an equivalent `Cookie` in the delta
/// jar.add(Cookie::new("a", "one"));
/// jar.add(Cookie::new("b", "two"));
/// assert_eq!(jar.delta().count(), 2);
///
/// // removing an original cookie adds a "removal" cookie to the delta
/// jar.remove(Cookie::named("original"));
/// assert_eq!(jar.delta().count(), 3);
///
/// // removing a new cookie that was added removes that `Cookie` from the delta
/// jar.remove(Cookie::named("a"));
/// assert_eq!(jar.delta().count(), 2);
/// ```
#[derive(Default, Debug, Clone)]
pub struct CookieJar {
original_cookies: HashSet<DeltaCookie>,
delta_cookies: HashSet<DeltaCookie>,
}
impl CookieJar {
/// Creates an empty cookie jar.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::CookieJar;
///
/// let jar = CookieJar::new();
/// assert_eq!(jar.iter().count(), 0);
/// ```
pub fn new() -> CookieJar {
CookieJar::default()
}
/// Returns a reference to the `Cookie` inside this jar with the name
/// `name`. If no such cookie exists, returns `None`.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie};
///
/// let mut jar = CookieJar::new();
/// assert!(jar.get("name").is_none());
///
/// jar.add(Cookie::new("name", "value"));
/// assert_eq!(jar.get("name").map(|c| c.value()), Some("value"));
/// ```
pub fn get(&self, name: &str) -> Option<&Cookie<'static>> {
self.delta_cookies
.get(name)
.or_else(|| self.original_cookies.get(name))
.and_then(|c| if !c.removed { Some(&c.cookie) } else { None })
}
/// Adds an "original" `cookie` to this jar. If an original cookie with the
/// same name already exists, it is replaced with `cookie`. Cookies added
/// with `add` take precedence and are not replaced by this method.
///
/// Adding an original cookie does not affect the [delta](#method.delta)
/// computation. This method is intended to be used to seed the cookie jar
/// with cookies received from a client's HTTP message.
///
/// For accurate `delta` computations, this method should not be called
/// after calling `remove`.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie};
///
/// let mut jar = CookieJar::new();
/// jar.add_original(Cookie::new("name", "value"));
/// jar.add_original(Cookie::new("second", "two"));
///
/// assert_eq!(jar.get("name").map(|c| c.value()), Some("value"));
/// assert_eq!(jar.get("second").map(|c| c.value()), Some("two"));
/// assert_eq!(jar.iter().count(), 2);
/// assert_eq!(jar.delta().count(), 0);
/// ```
pub fn add_original(&mut self, cookie: Cookie<'static>) {
self.original_cookies.replace(DeltaCookie::added(cookie));
}
/// Adds `cookie` to this jar. If a cookie with the same name already
/// exists, it is replaced with `cookie`.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie};
///
/// let mut jar = CookieJar::new();
/// jar.add(Cookie::new("name", "value"));
/// jar.add(Cookie::new("second", "two"));
///
/// assert_eq!(jar.get("name").map(|c| c.value()), Some("value"));
/// assert_eq!(jar.get("second").map(|c| c.value()), Some("two"));
/// assert_eq!(jar.iter().count(), 2);
/// assert_eq!(jar.delta().count(), 2);
/// ```
pub fn add(&mut self, cookie: Cookie<'static>) {
self.delta_cookies.replace(DeltaCookie::added(cookie));
}
/// Removes `cookie` from this jar. If an _original_ cookie with the same
/// name as `cookie` is present in the jar, a _removal_ cookie will be
/// present in the `delta` computation. To properly generate the removal
/// cookie, `cookie` must contain the same `path` and `domain` as the cookie
/// that was initially set.
///
/// A "removal" cookie is a cookie that has the same name as the original
/// cookie but has an empty value, a max-age of 0, and an expiration date
/// far in the past.
///
/// # Example
///
/// Removing an _original_ cookie results in a _removal_ cookie:
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie};
/// use time::Duration;
///
/// # fn main() {
/// let mut jar = CookieJar::new();
///
/// // Assume this cookie originally had a path of "/" and domain of "a.b".
/// jar.add_original(Cookie::new("name", "value"));
///
/// // If the path and domain were set, they must be provided to `remove`.
/// jar.remove(Cookie::build("name", "").path("/").domain("a.b").finish());
///
/// // The delta will contain the removal cookie.
/// let delta: Vec<_> = jar.delta().collect();
/// assert_eq!(delta.len(), 1);
/// assert_eq!(delta[0].name(), "name");
/// assert_eq!(delta[0].max_age(), Some(Duration::seconds(0)));
/// # }
/// ```
///
/// Removing a new cookie does not result in a _removal_ cookie:
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie};
///
/// let mut jar = CookieJar::new();
/// jar.add(Cookie::new("name", "value"));
/// assert_eq!(jar.delta().count(), 1);
///
/// jar.remove(Cookie::named("name"));
/// assert_eq!(jar.delta().count(), 0);
/// ```
pub fn remove(&mut self, mut cookie: Cookie<'static>) {
if self.original_cookies.contains(cookie.name()) {
cookie.set_value("");
cookie.set_max_age(Duration::seconds(0));
cookie.set_expires(time::now() - Duration::days(365));
self.delta_cookies.replace(DeltaCookie::removed(cookie));
} else {
self.delta_cookies.remove(cookie.name());
}
}
/// Removes `cookie` from this jar completely. This method differs from
/// `remove` in that no delta cookie is created under any condition. Neither
/// the `delta` nor `iter` methods will return a cookie that is removed
/// using this method.
///
/// # Example
///
/// Removing an _original_ cookie; no _removal_ cookie is generated:
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie};
/// use time::Duration;
///
/// # fn main() {
/// let mut jar = CookieJar::new();
///
/// // Add an original cookie and a new cookie.
/// jar.add_original(Cookie::new("name", "value"));
/// jar.add(Cookie::new("key", "value"));
/// assert_eq!(jar.delta().count(), 1);
/// assert_eq!(jar.iter().count(), 2);
///
/// // Now force remove the original cookie.
/// jar.force_remove(Cookie::new("name", "value"));
/// assert_eq!(jar.delta().count(), 1);
/// assert_eq!(jar.iter().count(), 1);
///
/// // Now force remove the new cookie.
/// jar.force_remove(Cookie::new("key", "value"));
/// assert_eq!(jar.delta().count(), 0);
/// assert_eq!(jar.iter().count(), 0);
/// # }
/// ```
pub fn force_remove<'a>(&mut self, cookie: Cookie<'a>) {
self.original_cookies.remove(cookie.name());
self.delta_cookies.remove(cookie.name());
}
/// Removes all cookies from this cookie jar.
#[deprecated(
since = "0.7.0",
note = "calling this method may not remove \
all cookies since the path and domain are not specified; use \
`remove` instead"
)]
pub fn clear(&mut self) {
self.delta_cookies.clear();
for delta in replace(&mut self.original_cookies, HashSet::new()) {
self.remove(delta.cookie);
}
}
/// Returns an iterator over cookies that represent the changes to this jar
/// over time. These cookies can be rendered directly as `Set-Cookie` header
/// values to affect the changes made to this jar on the client.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie};
///
/// let mut jar = CookieJar::new();
/// jar.add_original(Cookie::new("name", "value"));
/// jar.add_original(Cookie::new("second", "two"));
///
/// // Add new cookies.
/// jar.add(Cookie::new("new", "third"));
/// jar.add(Cookie::new("another", "fourth"));
/// jar.add(Cookie::new("yac", "fifth"));
///
/// // Remove some cookies.
/// jar.remove(Cookie::named("name"));
/// jar.remove(Cookie::named("another"));
///
/// // Delta contains two new cookies ("new", "yac") and a removal ("name").
/// assert_eq!(jar.delta().count(), 3);
/// ```
pub fn delta(&self) -> Delta {
Delta {
iter: self.delta_cookies.iter(),
}
}
/// Returns an iterator over all of the cookies present in this jar.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie};
///
/// let mut jar = CookieJar::new();
///
/// jar.add_original(Cookie::new("name", "value"));
/// jar.add_original(Cookie::new("second", "two"));
///
/// jar.add(Cookie::new("new", "third"));
/// jar.add(Cookie::new("another", "fourth"));
/// jar.add(Cookie::new("yac", "fifth"));
///
/// jar.remove(Cookie::named("name"));
/// jar.remove(Cookie::named("another"));
///
/// // There are three cookies in the jar: "second", "new", and "yac".
/// # assert_eq!(jar.iter().count(), 3);
/// for cookie in jar.iter() {
/// match cookie.name() {
/// "second" => assert_eq!(cookie.value(), "two"),
/// "new" => assert_eq!(cookie.value(), "third"),
/// "yac" => assert_eq!(cookie.value(), "fifth"),
/// _ => unreachable!("there are only three cookies in the jar")
/// }
/// }
/// ```
pub fn iter(&self) -> Iter {
Iter {
delta_cookies: self
.delta_cookies
.iter()
.chain(self.original_cookies.difference(&self.delta_cookies)),
}
}
/// Returns a `PrivateJar` with `self` as its parent jar using the key `key`
/// to sign/encrypt and verify/decrypt cookies added/retrieved from the
/// child jar.
///
/// Any modifications to the child jar will be reflected on the parent jar,
/// and any retrievals from the child jar will be made from the parent jar.
///
/// This method is only available when the `secure` feature is enabled.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{Cookie, CookieJar, Key};
///
/// // Generate a secure key.
/// let key = Key::generate();
///
/// // Add a private (signed + encrypted) cookie.
/// let mut jar = CookieJar::new();
/// jar.private(&key).add(Cookie::new("private", "text"));
///
/// // The cookie's contents are encrypted.
/// assert_ne!(jar.get("private").unwrap().value(), "text");
///
/// // They can be decrypted and verified through the child jar.
/// assert_eq!(jar.private(&key).get("private").unwrap().value(), "text");
///
/// // A tampered with cookie does not validate but still exists.
/// let mut cookie = jar.get("private").unwrap().clone();
/// jar.add(Cookie::new("private", cookie.value().to_string() + "!"));
/// assert!(jar.private(&key).get("private").is_none());
/// assert!(jar.get("private").is_some());
/// ```
#[cfg(feature = "secure-cookies")]
pub fn private(&mut self, key: &Key) -> PrivateJar {
PrivateJar::new(self, key)
}
/// Returns a `SignedJar` with `self` as its parent jar using the key `key`
/// to sign/verify cookies added/retrieved from the child jar.
///
/// Any modifications to the child jar will be reflected on the parent jar,
/// and any retrievals from the child jar will be made from the parent jar.
///
/// This method is only available when the `secure` feature is enabled.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{Cookie, CookieJar, Key};
///
/// // Generate a secure key.
/// let key = Key::generate();
///
/// // Add a signed cookie.
/// let mut jar = CookieJar::new();
/// jar.signed(&key).add(Cookie::new("signed", "text"));
///
/// // The cookie's contents are signed but still in plaintext.
/// assert_ne!(jar.get("signed").unwrap().value(), "text");
/// assert!(jar.get("signed").unwrap().value().contains("text"));
///
/// // They can be verified through the child jar.
/// assert_eq!(jar.signed(&key).get("signed").unwrap().value(), "text");
///
/// // A tampered with cookie does not validate but still exists.
/// let mut cookie = jar.get("signed").unwrap().clone();
/// jar.add(Cookie::new("signed", cookie.value().to_string() + "!"));
/// assert!(jar.signed(&key).get("signed").is_none());
/// assert!(jar.get("signed").is_some());
/// ```
#[cfg(feature = "secure-cookies")]
pub fn signed(&mut self, key: &Key) -> SignedJar {
SignedJar::new(self, key)
}
}
use std::collections::hash_set::Iter as HashSetIter;
/// Iterator over the changes to a cookie jar.
pub struct Delta<'a> {
iter: HashSetIter<'a, DeltaCookie>,
}
impl<'a> Iterator for Delta<'a> {
type Item = &'a Cookie<'static>;
fn next(&mut self) -> Option<&'a Cookie<'static>> {
self.iter.next().map(|c| &c.cookie)
}
}
use std::collections::hash_map::RandomState;
use std::collections::hash_set::Difference;
use std::iter::Chain;
/// Iterator over all of the cookies in a jar.
pub struct Iter<'a> {
delta_cookies:
Chain<HashSetIter<'a, DeltaCookie>, Difference<'a, DeltaCookie, RandomState>>,
}
impl<'a> Iterator for Iter<'a> {
type Item = &'a Cookie<'static>;
fn next(&mut self) -> Option<&'a Cookie<'static>> {
for cookie in self.delta_cookies.by_ref() {
if !cookie.removed {
return Some(&*cookie);
}
}
None
}
}
#[cfg(test)]
mod test {
#[cfg(feature = "secure-cookies")]
use super::Key;
use super::{Cookie, CookieJar};
#[test]
#[allow(deprecated)]
fn simple() {
let mut c = CookieJar::new();
c.add(Cookie::new("test", ""));
c.add(Cookie::new("test2", ""));
c.remove(Cookie::named("test"));
assert!(c.get("test").is_none());
assert!(c.get("test2").is_some());
c.add(Cookie::new("test3", ""));
c.clear();
assert!(c.get("test").is_none());
assert!(c.get("test2").is_none());
assert!(c.get("test3").is_none());
}
#[test]
fn jar_is_send() {
fn is_send<T: Send>(_: T) -> bool {
true
}
assert!(is_send(CookieJar::new()))
}
#[test]
#[cfg(feature = "secure-cookies")]
fn iter() {
let key = Key::generate();
let mut c = CookieJar::new();
c.add_original(Cookie::new("original", "original"));
c.add(Cookie::new("test", "test"));
c.add(Cookie::new("test2", "test2"));
c.add(Cookie::new("test3", "test3"));
assert_eq!(c.iter().count(), 4);
c.signed(&key).add(Cookie::new("signed", "signed"));
c.private(&key).add(Cookie::new("encrypted", "encrypted"));
assert_eq!(c.iter().count(), 6);
c.remove(Cookie::named("test"));
assert_eq!(c.iter().count(), 5);
c.remove(Cookie::named("signed"));
c.remove(Cookie::named("test2"));
assert_eq!(c.iter().count(), 3);
c.add(Cookie::new("test2", "test2"));
assert_eq!(c.iter().count(), 4);
c.remove(Cookie::named("test2"));
assert_eq!(c.iter().count(), 3);
}
#[test]
#[cfg(feature = "secure-cookies")]
fn delta() {
use std::collections::HashMap;
use time::Duration;
let mut c = CookieJar::new();
c.add_original(Cookie::new("original", "original"));
c.add_original(Cookie::new("original1", "original1"));
c.add(Cookie::new("test", "test"));
c.add(Cookie::new("test2", "test2"));
c.add(Cookie::new("test3", "test3"));
c.add(Cookie::new("test4", "test4"));
c.remove(Cookie::named("test"));
c.remove(Cookie::named("original"));
assert_eq!(c.delta().count(), 4);
let names: HashMap<_, _> = c.delta().map(|c| (c.name(), c.max_age())).collect();
assert!(names.get("test2").unwrap().is_none());
assert!(names.get("test3").unwrap().is_none());
assert!(names.get("test4").unwrap().is_none());
assert_eq!(names.get("original").unwrap(), &Some(Duration::seconds(0)));
}
#[test]
fn replace_original() {
let mut jar = CookieJar::new();
jar.add_original(Cookie::new("original_a", "a"));
jar.add_original(Cookie::new("original_b", "b"));
assert_eq!(jar.get("original_a").unwrap().value(), "a");
jar.add(Cookie::new("original_a", "av2"));
assert_eq!(jar.get("original_a").unwrap().value(), "av2");
}
#[test]
fn empty_delta() {
let mut jar = CookieJar::new();
jar.add(Cookie::new("name", "val"));
assert_eq!(jar.delta().count(), 1);
jar.remove(Cookie::named("name"));
assert_eq!(jar.delta().count(), 0);
jar.add_original(Cookie::new("name", "val"));
assert_eq!(jar.delta().count(), 0);
jar.remove(Cookie::named("name"));
assert_eq!(jar.delta().count(), 1);
jar.add(Cookie::new("name", "val"));
assert_eq!(jar.delta().count(), 1);
jar.remove(Cookie::named("name"));
assert_eq!(jar.delta().count(), 1);
}
#[test]
fn add_remove_add() {
let mut jar = CookieJar::new();
jar.add_original(Cookie::new("name", "val"));
assert_eq!(jar.delta().count(), 0);
jar.remove(Cookie::named("name"));
assert_eq!(jar.delta().filter(|c| c.value().is_empty()).count(), 1);
assert_eq!(jar.delta().count(), 1);
// The cookie's been deleted. Another original doesn't change that.
jar.add_original(Cookie::new("name", "val"));
assert_eq!(jar.delta().filter(|c| c.value().is_empty()).count(), 1);
assert_eq!(jar.delta().count(), 1);
jar.remove(Cookie::named("name"));
assert_eq!(jar.delta().filter(|c| c.value().is_empty()).count(), 1);
assert_eq!(jar.delta().count(), 1);
jar.add(Cookie::new("name", "val"));
assert_eq!(jar.delta().filter(|c| !c.value().is_empty()).count(), 1);
assert_eq!(jar.delta().count(), 1);
jar.remove(Cookie::named("name"));
assert_eq!(jar.delta().filter(|c| c.value().is_empty()).count(), 1);
assert_eq!(jar.delta().count(), 1);
}
#[test]
fn replace_remove() {
let mut jar = CookieJar::new();
jar.add_original(Cookie::new("name", "val"));
assert_eq!(jar.delta().count(), 0);
jar.add(Cookie::new("name", "val"));
assert_eq!(jar.delta().count(), 1);
assert_eq!(jar.delta().filter(|c| !c.value().is_empty()).count(), 1);
jar.remove(Cookie::named("name"));
assert_eq!(jar.delta().filter(|c| c.value().is_empty()).count(), 1);
}
#[test]
fn remove_with_path() {
let mut jar = CookieJar::new();
jar.add_original(Cookie::build("name", "val").finish());
assert_eq!(jar.iter().count(), 1);
assert_eq!(jar.delta().count(), 0);
assert_eq!(jar.iter().filter(|c| c.path().is_none()).count(), 1);
jar.remove(Cookie::build("name", "").path("/").finish());
assert_eq!(jar.iter().count(), 0);
assert_eq!(jar.delta().count(), 1);
assert_eq!(jar.delta().filter(|c| c.value().is_empty()).count(), 1);
assert_eq!(jar.delta().filter(|c| c.path() == Some("/")).count(), 1);
}
}

1087
actix-http/src/cookie/mod.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,426 @@
use std::borrow::Cow;
use std::cmp;
use std::convert::From;
use std::error::Error;
use std::fmt;
use std::str::Utf8Error;
use percent_encoding::percent_decode;
use time::{self, Duration};
use super::{Cookie, CookieStr, SameSite};
/// Enum corresponding to a parsing error.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum ParseError {
/// The cookie did not contain a name/value pair.
MissingPair,
/// The cookie's name was empty.
EmptyName,
/// Decoding the cookie's name or value resulted in invalid UTF-8.
Utf8Error(Utf8Error),
/// It is discouraged to exhaustively match on this enum as its variants may
/// grow without a breaking-change bump in version numbers.
#[doc(hidden)]
__Nonexhasutive,
}
impl ParseError {
/// Returns a description of this error as a string
pub fn as_str(&self) -> &'static str {
match *self {
ParseError::MissingPair => "the cookie is missing a name/value pair",
ParseError::EmptyName => "the cookie's name is empty",
ParseError::Utf8Error(_) => {
"decoding the cookie's name or value resulted in invalid UTF-8"
}
ParseError::__Nonexhasutive => unreachable!("__Nonexhasutive ParseError"),
}
}
}
impl fmt::Display for ParseError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.as_str())
}
}
impl From<Utf8Error> for ParseError {
fn from(error: Utf8Error) -> ParseError {
ParseError::Utf8Error(error)
}
}
impl Error for ParseError {
fn description(&self) -> &str {
self.as_str()
}
}
fn indexes_of(needle: &str, haystack: &str) -> Option<(usize, usize)> {
let haystack_start = haystack.as_ptr() as usize;
let needle_start = needle.as_ptr() as usize;
if needle_start < haystack_start {
return None;
}
if (needle_start + needle.len()) > (haystack_start + haystack.len()) {
return None;
}
let start = needle_start - haystack_start;
let end = start + needle.len();
Some((start, end))
}
fn name_val_decoded(
name: &str,
val: &str,
) -> Result<(CookieStr, CookieStr), ParseError> {
let decoded_name = percent_decode(name.as_bytes()).decode_utf8()?;
let decoded_value = percent_decode(val.as_bytes()).decode_utf8()?;
let name = CookieStr::Concrete(Cow::Owned(decoded_name.into_owned()));
let val = CookieStr::Concrete(Cow::Owned(decoded_value.into_owned()));
Ok((name, val))
}
// This function does the real parsing but _does not_ set the `cookie_string` in
// the returned cookie object. This only exists so that the borrow to `s` is
// returned at the end of the call, allowing the `cookie_string` field to be
// set in the outer `parse` function.
fn parse_inner<'c>(s: &str, decode: bool) -> Result<Cookie<'c>, ParseError> {
let mut attributes = s.split(';');
let key_value = match attributes.next() {
Some(s) => s,
_ => panic!(),
};
// Determine the name = val.
let (name, value) = match key_value.find('=') {
Some(i) => (key_value[..i].trim(), key_value[(i + 1)..].trim()),
None => return Err(ParseError::MissingPair),
};
if name.is_empty() {
return Err(ParseError::EmptyName);
}
// Create a cookie with all of the defaults. We'll fill things in while we
// iterate through the parameters below.
let (name, value) = if decode {
name_val_decoded(name, value)?
} else {
let name_indexes = indexes_of(name, s).expect("name sub");
let value_indexes = indexes_of(value, s).expect("value sub");
let name = CookieStr::Indexed(name_indexes.0, name_indexes.1);
let value = CookieStr::Indexed(value_indexes.0, value_indexes.1);
(name, value)
};
let mut cookie = Cookie {
name,
value,
cookie_string: None,
expires: None,
max_age: None,
domain: None,
path: None,
secure: None,
http_only: None,
same_site: None,
};
for attr in attributes {
let (key, value) = match attr.find('=') {
Some(i) => (attr[..i].trim(), Some(attr[(i + 1)..].trim())),
None => (attr.trim(), None),
};
match (&*key.to_ascii_lowercase(), value) {
("secure", _) => cookie.secure = Some(true),
("httponly", _) => cookie.http_only = Some(true),
("max-age", Some(v)) => {
// See RFC 6265 Section 5.2.2, negative values indicate that the
// earliest possible expiration time should be used, so set the
// max age as 0 seconds.
cookie.max_age = match v.parse() {
Ok(val) if val <= 0 => Some(Duration::zero()),
Ok(val) => {
// Don't panic if the max age seconds is greater than what's supported by
// `Duration`.
let val = cmp::min(val, Duration::max_value().num_seconds());
Some(Duration::seconds(val))
}
Err(_) => continue,
};
}
("domain", Some(mut domain)) if !domain.is_empty() => {
if domain.starts_with('.') {
domain = &domain[1..];
}
let (i, j) = indexes_of(domain, s).expect("domain sub");
cookie.domain = Some(CookieStr::Indexed(i, j));
}
("path", Some(v)) => {
let (i, j) = indexes_of(v, s).expect("path sub");
cookie.path = Some(CookieStr::Indexed(i, j));
}
("samesite", Some(v)) => {
if v.eq_ignore_ascii_case("strict") {
cookie.same_site = Some(SameSite::Strict);
} else if v.eq_ignore_ascii_case("lax") {
cookie.same_site = Some(SameSite::Lax);
} else {
// We do nothing here, for now. When/if the `SameSite`
// attribute becomes standard, the spec says that we should
// ignore this cookie, i.e, fail to parse it, when an
// invalid value is passed in. The draft is at
// http://httpwg.org/http-extensions/draft-ietf-httpbis-cookie-same-site.html.
}
}
("expires", Some(v)) => {
// Try strptime with three date formats according to
// http://tools.ietf.org/html/rfc2616#section-3.3.1. Try
// additional ones as encountered in the real world.
let tm = time::strptime(v, "%a, %d %b %Y %H:%M:%S %Z")
.or_else(|_| time::strptime(v, "%A, %d-%b-%y %H:%M:%S %Z"))
.or_else(|_| time::strptime(v, "%a, %d-%b-%Y %H:%M:%S %Z"))
.or_else(|_| time::strptime(v, "%a %b %d %H:%M:%S %Y"));
if let Ok(time) = tm {
cookie.expires = Some(time)
}
}
_ => {
// We're going to be permissive here. If we have no idea what
// this is, then it's something nonstandard. We're not going to
// store it (because it's not compliant), but we're also not
// going to emit an error.
}
}
}
Ok(cookie)
}
pub fn parse_cookie<'c, S>(cow: S, decode: bool) -> Result<Cookie<'c>, ParseError>
where
S: Into<Cow<'c, str>>,
{
let s = cow.into();
let mut cookie = parse_inner(&s, decode)?;
cookie.cookie_string = Some(s);
Ok(cookie)
}
#[cfg(test)]
mod tests {
use super::{Cookie, SameSite};
use time::{strptime, Duration};
macro_rules! assert_eq_parse {
($string:expr, $expected:expr) => {
let cookie = match Cookie::parse($string) {
Ok(cookie) => cookie,
Err(e) => panic!("Failed to parse {:?}: {:?}", $string, e),
};
assert_eq!(cookie, $expected);
};
}
macro_rules! assert_ne_parse {
($string:expr, $expected:expr) => {
let cookie = match Cookie::parse($string) {
Ok(cookie) => cookie,
Err(e) => panic!("Failed to parse {:?}: {:?}", $string, e),
};
assert_ne!(cookie, $expected);
};
}
#[test]
fn parse_same_site() {
let expected = Cookie::build("foo", "bar")
.same_site(SameSite::Lax)
.finish();
assert_eq_parse!("foo=bar; SameSite=Lax", expected);
assert_eq_parse!("foo=bar; SameSite=lax", expected);
assert_eq_parse!("foo=bar; SameSite=LAX", expected);
assert_eq_parse!("foo=bar; samesite=Lax", expected);
assert_eq_parse!("foo=bar; SAMESITE=Lax", expected);
let expected = Cookie::build("foo", "bar")
.same_site(SameSite::Strict)
.finish();
assert_eq_parse!("foo=bar; SameSite=Strict", expected);
assert_eq_parse!("foo=bar; SameSITE=Strict", expected);
assert_eq_parse!("foo=bar; SameSite=strict", expected);
assert_eq_parse!("foo=bar; SameSite=STrICT", expected);
assert_eq_parse!("foo=bar; SameSite=STRICT", expected);
}
#[test]
fn parse() {
assert!(Cookie::parse("bar").is_err());
assert!(Cookie::parse("=bar").is_err());
assert!(Cookie::parse(" =bar").is_err());
assert!(Cookie::parse("foo=").is_ok());
let expected = Cookie::build("foo", "bar=baz").finish();
assert_eq_parse!("foo=bar=baz", expected);
let mut expected = Cookie::build("foo", "bar").finish();
assert_eq_parse!("foo=bar", expected);
assert_eq_parse!("foo = bar", expected);
assert_eq_parse!(" foo=bar ", expected);
assert_eq_parse!(" foo=bar ;Domain=", expected);
assert_eq_parse!(" foo=bar ;Domain= ", expected);
assert_eq_parse!(" foo=bar ;Ignored", expected);
let mut unexpected = Cookie::build("foo", "bar").http_only(false).finish();
assert_ne_parse!(" foo=bar ;HttpOnly", unexpected);
assert_ne_parse!(" foo=bar; httponly", unexpected);
expected.set_http_only(true);
assert_eq_parse!(" foo=bar ;HttpOnly", expected);
assert_eq_parse!(" foo=bar ;httponly", expected);
assert_eq_parse!(" foo=bar ;HTTPONLY=whatever", expected);
assert_eq_parse!(" foo=bar ; sekure; HTTPONLY", expected);
expected.set_secure(true);
assert_eq_parse!(" foo=bar ;HttpOnly; Secure", expected);
assert_eq_parse!(" foo=bar ;HttpOnly; Secure=aaaa", expected);
unexpected.set_http_only(true);
unexpected.set_secure(true);
assert_ne_parse!(" foo=bar ;HttpOnly; skeure", unexpected);
assert_ne_parse!(" foo=bar ;HttpOnly; =secure", unexpected);
assert_ne_parse!(" foo=bar ;HttpOnly;", unexpected);
unexpected.set_secure(false);
assert_ne_parse!(" foo=bar ;HttpOnly; secure", unexpected);
assert_ne_parse!(" foo=bar ;HttpOnly; secure", unexpected);
assert_ne_parse!(" foo=bar ;HttpOnly; secure", unexpected);
expected.set_max_age(Duration::zero());
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age=0", expected);
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age = 0 ", expected);
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age=-1", expected);
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age = -1 ", expected);
expected.set_max_age(Duration::minutes(1));
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age=60", expected);
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age = 60 ", expected);
expected.set_max_age(Duration::seconds(4));
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age=4", expected);
assert_eq_parse!(" foo=bar ;HttpOnly; Secure; Max-Age = 4 ", expected);
unexpected.set_secure(true);
unexpected.set_max_age(Duration::minutes(1));
assert_ne_parse!(" foo=bar ;HttpOnly; Secure; Max-Age=122", unexpected);
assert_ne_parse!(" foo=bar ;HttpOnly; Secure; Max-Age = 38 ", unexpected);
assert_ne_parse!(" foo=bar ;HttpOnly; Secure; Max-Age=51", unexpected);
assert_ne_parse!(" foo=bar ;HttpOnly; Secure; Max-Age = -1 ", unexpected);
assert_ne_parse!(" foo=bar ;HttpOnly; Secure; Max-Age = 0", unexpected);
expected.set_path("/");
assert_eq_parse!("foo=bar;HttpOnly; Secure; Max-Age=4; Path=/", expected);
assert_eq_parse!("foo=bar;HttpOnly; Secure; Max-Age=4;Path=/", expected);
expected.set_path("/foo");
assert_eq_parse!("foo=bar;HttpOnly; Secure; Max-Age=4; Path=/foo", expected);
assert_eq_parse!("foo=bar;HttpOnly; Secure; Max-Age=4;Path=/foo", expected);
assert_eq_parse!("foo=bar;HttpOnly; Secure; Max-Age=4;path=/foo", expected);
assert_eq_parse!("foo=bar;HttpOnly; Secure; Max-Age=4;path = /foo", expected);
unexpected.set_max_age(Duration::seconds(4));
unexpected.set_path("/bar");
assert_ne_parse!("foo=bar;HttpOnly; Secure; Max-Age=4; Path=/foo", unexpected);
assert_ne_parse!("foo=bar;HttpOnly; Secure; Max-Age=4;Path=/baz", unexpected);
expected.set_domain("www.foo.com");
assert_eq_parse!(
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
Domain=www.foo.com",
expected
);
expected.set_domain("foo.com");
assert_eq_parse!(
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
Domain=foo.com",
expected
);
assert_eq_parse!(
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
Domain=FOO.COM",
expected
);
unexpected.set_path("/foo");
unexpected.set_domain("bar.com");
assert_ne_parse!(
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
Domain=foo.com",
unexpected
);
assert_ne_parse!(
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
Domain=FOO.COM",
unexpected
);
let time_str = "Wed, 21 Oct 2015 07:28:00 GMT";
let expires = strptime(time_str, "%a, %d %b %Y %H:%M:%S %Z").unwrap();
expected.set_expires(expires);
assert_eq_parse!(
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
Domain=foo.com; Expires=Wed, 21 Oct 2015 07:28:00 GMT",
expected
);
unexpected.set_domain("foo.com");
let bad_expires = strptime(time_str, "%a, %d %b %Y %H:%S:%M %Z").unwrap();
expected.set_expires(bad_expires);
assert_ne_parse!(
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
Domain=foo.com; Expires=Wed, 21 Oct 2015 07:28:00 GMT",
unexpected
);
}
#[test]
fn odd_characters() {
let expected = Cookie::new("foo", "b%2Fr");
assert_eq_parse!("foo=b%2Fr", expected);
}
#[test]
fn odd_characters_encoded() {
let expected = Cookie::new("foo", "b/r");
let cookie = match Cookie::parse_encoded("foo=b%2Fr") {
Ok(cookie) => cookie,
Err(e) => panic!("Failed to parse: {:?}", e),
};
assert_eq!(cookie, expected);
}
#[test]
fn do_not_panic_on_large_max_ages() {
let max_seconds = Duration::max_value().num_seconds();
let expected = Cookie::build("foo", "bar")
.max_age(Duration::seconds(max_seconds))
.finish();
assert_eq_parse!(format!(" foo=bar; Max-Age={:?}", max_seconds + 1), expected);
}
}

View File

@ -0,0 +1,180 @@
use ring::digest::{Algorithm, SHA256};
use ring::hkdf::expand;
use ring::hmac::SigningKey;
use ring::rand::{SecureRandom, SystemRandom};
use super::private::KEY_LEN as PRIVATE_KEY_LEN;
use super::signed::KEY_LEN as SIGNED_KEY_LEN;
static HKDF_DIGEST: &'static Algorithm = &SHA256;
const KEYS_INFO: &'static str = "COOKIE;SIGNED:HMAC-SHA256;PRIVATE:AEAD-AES-256-GCM";
/// A cryptographic master key for use with `Signed` and/or `Private` jars.
///
/// This structure encapsulates secure, cryptographic keys for use with both
/// [PrivateJar](struct.PrivateJar.html) and [SignedJar](struct.SignedJar.html).
/// It can be derived from a single master key via
/// [from_master](#method.from_master) or generated from a secure random source
/// via [generate](#method.generate). A single instance of `Key` can be used for
/// both a `PrivateJar` and a `SignedJar`.
///
/// This type is only available when the `secure` feature is enabled.
#[derive(Clone)]
pub struct Key {
signing_key: [u8; SIGNED_KEY_LEN],
encryption_key: [u8; PRIVATE_KEY_LEN],
}
impl Key {
/// Derives new signing/encryption keys from a master key.
///
/// The master key must be at least 256-bits (32 bytes). For security, the
/// master key _must_ be cryptographically random. The keys are derived
/// deterministically from the master key.
///
/// # Panics
///
/// Panics if `key` is less than 32 bytes in length.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Key;
///
/// # /*
/// let master_key = { /* a cryptographically random key >= 32 bytes */ };
/// # */
/// # let master_key: &Vec<u8> = &(0..32).collect();
///
/// let key = Key::from_master(master_key);
/// ```
pub fn from_master(key: &[u8]) -> Key {
if key.len() < 32 {
panic!(
"bad master key length: expected at least 32 bytes, found {}",
key.len()
);
}
// Expand the user's key into two.
let prk = SigningKey::new(HKDF_DIGEST, key);
let mut both_keys = [0; SIGNED_KEY_LEN + PRIVATE_KEY_LEN];
expand(&prk, KEYS_INFO.as_bytes(), &mut both_keys);
// Copy the keys into their respective arrays.
let mut signing_key = [0; SIGNED_KEY_LEN];
let mut encryption_key = [0; PRIVATE_KEY_LEN];
signing_key.copy_from_slice(&both_keys[..SIGNED_KEY_LEN]);
encryption_key.copy_from_slice(&both_keys[SIGNED_KEY_LEN..]);
Key {
signing_key,
encryption_key,
}
}
/// Generates signing/encryption keys from a secure, random source. Keys are
/// generated nondeterministically.
///
/// # Panics
///
/// Panics if randomness cannot be retrieved from the operating system. See
/// [try_generate](#method.try_generate) for a non-panicking version.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Key;
///
/// let key = Key::generate();
/// ```
pub fn generate() -> Key {
Self::try_generate().expect("failed to generate `Key` from randomness")
}
/// Attempts to generate signing/encryption keys from a secure, random
/// source. Keys are generated nondeterministically. If randomness cannot be
/// retrieved from the underlying operating system, returns `None`.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Key;
///
/// let key = Key::try_generate();
/// ```
pub fn try_generate() -> Option<Key> {
let mut sign_key = [0; SIGNED_KEY_LEN];
let mut enc_key = [0; PRIVATE_KEY_LEN];
let rng = SystemRandom::new();
if rng.fill(&mut sign_key).is_err() || rng.fill(&mut enc_key).is_err() {
return None;
}
Some(Key {
signing_key: sign_key,
encryption_key: enc_key,
})
}
/// Returns the raw bytes of a key suitable for signing cookies.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Key;
///
/// let key = Key::generate();
/// let signing_key = key.signing();
/// ```
pub fn signing(&self) -> &[u8] {
&self.signing_key[..]
}
/// Returns the raw bytes of a key suitable for encrypting cookies.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::Key;
///
/// let key = Key::generate();
/// let encryption_key = key.encryption();
/// ```
pub fn encryption(&self) -> &[u8] {
&self.encryption_key[..]
}
}
#[cfg(test)]
mod test {
use super::Key;
#[test]
fn deterministic_from_master() {
let master_key: Vec<u8> = (0..32).collect();
let key_a = Key::from_master(&master_key);
let key_b = Key::from_master(&master_key);
assert_eq!(key_a.signing(), key_b.signing());
assert_eq!(key_a.encryption(), key_b.encryption());
assert_ne!(key_a.encryption(), key_a.signing());
let master_key_2: Vec<u8> = (32..64).collect();
let key_2 = Key::from_master(&master_key_2);
assert_ne!(key_2.signing(), key_a.signing());
assert_ne!(key_2.encryption(), key_a.encryption());
}
#[test]
fn non_deterministic_generate() {
let key_a = Key::generate();
let key_b = Key::generate();
assert_ne!(key_a.signing(), key_b.signing());
assert_ne!(key_a.encryption(), key_b.encryption());
}
}

View File

@ -0,0 +1,40 @@
#[cfg(test)]
macro_rules! assert_simple_behaviour {
($clear:expr, $secure:expr) => {{
assert_eq!($clear.iter().count(), 0);
$secure.add(Cookie::new("name", "val"));
assert_eq!($clear.iter().count(), 1);
assert_eq!($secure.get("name").unwrap().value(), "val");
assert_ne!($clear.get("name").unwrap().value(), "val");
$secure.add(Cookie::new("another", "two"));
assert_eq!($clear.iter().count(), 2);
$clear.remove(Cookie::named("another"));
assert_eq!($clear.iter().count(), 1);
$secure.remove(Cookie::named("name"));
assert_eq!($clear.iter().count(), 0);
}};
}
#[cfg(test)]
macro_rules! assert_secure_behaviour {
($clear:expr, $secure:expr) => {{
$secure.add(Cookie::new("secure", "secure"));
assert!($clear.get("secure").unwrap().value() != "secure");
assert!($secure.get("secure").unwrap().value() == "secure");
let mut cookie = $clear.get("secure").unwrap().clone();
let new_val = format!("{}l", cookie.value());
cookie.set_value(new_val);
$clear.add(cookie);
assert!($secure.get("secure").is_none());
let mut cookie = $clear.get("secure").unwrap().clone();
cookie.set_value("foobar");
$clear.add(cookie);
assert!($secure.get("secure").is_none());
}};
}

View File

@ -0,0 +1,10 @@
//! Fork of https://github.com/alexcrichton/cookie-rs
#[macro_use]
mod macros;
mod key;
mod private;
mod signed;
pub use self::key::*;
pub use self::private::*;
pub use self::signed::*;

View File

@ -0,0 +1,269 @@
use std::str;
use log::warn;
use ring::aead::{open_in_place, seal_in_place, Aad, Algorithm, Nonce, AES_256_GCM};
use ring::aead::{OpeningKey, SealingKey};
use ring::rand::{SecureRandom, SystemRandom};
use super::Key;
use crate::cookie::{Cookie, CookieJar};
// Keep these in sync, and keep the key len synced with the `private` docs as
// well as the `KEYS_INFO` const in secure::Key.
static ALGO: &'static Algorithm = &AES_256_GCM;
const NONCE_LEN: usize = 12;
pub const KEY_LEN: usize = 32;
/// A child cookie jar that provides authenticated encryption for its cookies.
///
/// A _private_ child jar signs and encrypts all the cookies added to it and
/// verifies and decrypts cookies retrieved from it. Any cookies stored in a
/// `PrivateJar` are simultaneously assured confidentiality, integrity, and
/// authenticity. In other words, clients cannot discover nor tamper with the
/// contents of a cookie, nor can they fabricate cookie data.
///
/// This type is only available when the `secure` feature is enabled.
pub struct PrivateJar<'a> {
parent: &'a mut CookieJar,
key: [u8; KEY_LEN],
}
impl<'a> PrivateJar<'a> {
/// Creates a new child `PrivateJar` with parent `parent` and key `key`.
/// This method is typically called indirectly via the `signed` method of
/// `CookieJar`.
#[doc(hidden)]
pub fn new(parent: &'a mut CookieJar, key: &Key) -> PrivateJar<'a> {
let mut key_array = [0u8; KEY_LEN];
key_array.copy_from_slice(key.encryption());
PrivateJar {
parent,
key: key_array,
}
}
/// Given a sealed value `str` and a key name `name`, where the nonce is
/// prepended to the original value and then both are Base64 encoded,
/// verifies and decrypts the sealed value and returns it. If there's a
/// problem, returns an `Err` with a string describing the issue.
fn unseal(&self, name: &str, value: &str) -> Result<String, &'static str> {
let mut data = base64::decode(value).map_err(|_| "bad base64 value")?;
if data.len() <= NONCE_LEN {
return Err("length of decoded data is <= NONCE_LEN");
}
let ad = Aad::from(name.as_bytes());
let key = OpeningKey::new(ALGO, &self.key).expect("opening key");
let (nonce, sealed) = data.split_at_mut(NONCE_LEN);
let nonce =
Nonce::try_assume_unique_for_key(nonce).expect("invalid length of `nonce`");
let unsealed = open_in_place(&key, nonce, ad, 0, sealed)
.map_err(|_| "invalid key/nonce/value: bad seal")?;
if let Ok(unsealed_utf8) = str::from_utf8(unsealed) {
Ok(unsealed_utf8.to_string())
} else {
warn!(
"Private cookie does not have utf8 content!
It is likely the secret key used to encrypt them has been leaked.
Please change it as soon as possible."
);
Err("bad unsealed utf8")
}
}
/// Returns a reference to the `Cookie` inside this jar with the name `name`
/// and authenticates and decrypts the cookie's value, returning a `Cookie`
/// with the decrypted value. If the cookie cannot be found, or the cookie
/// fails to authenticate or decrypt, `None` is returned.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie, Key};
///
/// let key = Key::generate();
/// let mut jar = CookieJar::new();
/// let mut private_jar = jar.private(&key);
/// assert!(private_jar.get("name").is_none());
///
/// private_jar.add(Cookie::new("name", "value"));
/// assert_eq!(private_jar.get("name").unwrap().value(), "value");
/// ```
pub fn get(&self, name: &str) -> Option<Cookie<'static>> {
if let Some(cookie_ref) = self.parent.get(name) {
let mut cookie = cookie_ref.clone();
if let Ok(value) = self.unseal(name, cookie.value()) {
cookie.set_value(value);
return Some(cookie);
}
}
None
}
/// Adds `cookie` to the parent jar. The cookie's value is encrypted with
/// authenticated encryption assuring confidentiality, integrity, and
/// authenticity.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie, Key};
///
/// let key = Key::generate();
/// let mut jar = CookieJar::new();
/// jar.private(&key).add(Cookie::new("name", "value"));
///
/// assert_ne!(jar.get("name").unwrap().value(), "value");
/// assert_eq!(jar.private(&key).get("name").unwrap().value(), "value");
/// ```
pub fn add(&mut self, mut cookie: Cookie<'static>) {
self.encrypt_cookie(&mut cookie);
// Add the sealed cookie to the parent.
self.parent.add(cookie);
}
/// Adds an "original" `cookie` to parent jar. The cookie's value is
/// encrypted with authenticated encryption assuring confidentiality,
/// integrity, and authenticity. Adding an original cookie does not affect
/// the [`CookieJar::delta()`](struct.CookieJar.html#method.delta)
/// computation. This method is intended to be used to seed the cookie jar
/// with cookies received from a client's HTTP message.
///
/// For accurate `delta` computations, this method should not be called
/// after calling `remove`.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie, Key};
///
/// let key = Key::generate();
/// let mut jar = CookieJar::new();
/// jar.private(&key).add_original(Cookie::new("name", "value"));
///
/// assert_eq!(jar.iter().count(), 1);
/// assert_eq!(jar.delta().count(), 0);
/// ```
pub fn add_original(&mut self, mut cookie: Cookie<'static>) {
self.encrypt_cookie(&mut cookie);
// Add the sealed cookie to the parent.
self.parent.add_original(cookie);
}
/// Encrypts the cookie's value with
/// authenticated encryption assuring confidentiality, integrity, and authenticity.
fn encrypt_cookie(&self, cookie: &mut Cookie) {
let name = cookie.name().as_bytes();
let value = cookie.value().as_bytes();
let data = encrypt_name_value(name, value, &self.key);
// Base64 encode the nonce and encrypted value.
let sealed_value = base64::encode(&data);
cookie.set_value(sealed_value);
}
/// Removes `cookie` from the parent jar.
///
/// For correct removal, the passed in `cookie` must contain the same `path`
/// and `domain` as the cookie that was initially set.
///
/// See [CookieJar::remove](struct.CookieJar.html#method.remove) for more
/// details.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie, Key};
///
/// let key = Key::generate();
/// let mut jar = CookieJar::new();
/// let mut private_jar = jar.private(&key);
///
/// private_jar.add(Cookie::new("name", "value"));
/// assert!(private_jar.get("name").is_some());
///
/// private_jar.remove(Cookie::named("name"));
/// assert!(private_jar.get("name").is_none());
/// ```
pub fn remove(&mut self, cookie: Cookie<'static>) {
self.parent.remove(cookie);
}
}
fn encrypt_name_value(name: &[u8], value: &[u8], key: &[u8]) -> Vec<u8> {
// Create the `SealingKey` structure.
let key = SealingKey::new(ALGO, key).expect("sealing key creation");
// Create a vec to hold the [nonce | cookie value | overhead].
let overhead = ALGO.tag_len();
let mut data = vec![0; NONCE_LEN + value.len() + overhead];
// Randomly generate the nonce, then copy the cookie value as input.
let (nonce, in_out) = data.split_at_mut(NONCE_LEN);
SystemRandom::new()
.fill(nonce)
.expect("couldn't random fill nonce");
in_out[..value.len()].copy_from_slice(value);
let nonce =
Nonce::try_assume_unique_for_key(nonce).expect("invalid length of `nonce`");
// Use cookie's name as associated data to prevent value swapping.
let ad = Aad::from(name);
// Perform the actual sealing operation and get the output length.
let output_len =
seal_in_place(&key, nonce, ad, in_out, overhead).expect("in-place seal");
// Remove the overhead and return the sealed content.
data.truncate(NONCE_LEN + output_len);
data
}
#[cfg(test)]
mod test {
use super::{encrypt_name_value, Cookie, CookieJar, Key};
#[test]
fn simple() {
let key = Key::generate();
let mut jar = CookieJar::new();
assert_simple_behaviour!(jar, jar.private(&key));
}
#[test]
fn private() {
let key = Key::generate();
let mut jar = CookieJar::new();
assert_secure_behaviour!(jar, jar.private(&key));
}
#[test]
fn non_utf8() {
let key = Key::generate();
let mut jar = CookieJar::new();
let name = "malicious";
let mut assert_non_utf8 = |value: &[u8]| {
let sealed = encrypt_name_value(name.as_bytes(), value, &key.encryption());
let encoded = base64::encode(&sealed);
assert_eq!(
jar.private(&key).unseal(name, &encoded),
Err("bad unsealed utf8")
);
jar.add(Cookie::new(name, encoded));
assert_eq!(jar.private(&key).get(name), None);
};
assert_non_utf8(&[0x72, 0xfb, 0xdf, 0x74]); // rûst in ISO/IEC 8859-1
let mut malicious =
String::from(r#"{"id":"abc123??%X","admin":true}"#).into_bytes();
malicious[8] |= 0b1100_0000;
malicious[9] |= 0b1100_0000;
assert_non_utf8(&malicious);
}
}

View File

@ -0,0 +1,185 @@
use ring::digest::{Algorithm, SHA256};
use ring::hmac::{sign, verify_with_own_key as verify, SigningKey};
use super::Key;
use crate::cookie::{Cookie, CookieJar};
// Keep these in sync, and keep the key len synced with the `signed` docs as
// well as the `KEYS_INFO` const in secure::Key.
static HMAC_DIGEST: &'static Algorithm = &SHA256;
const BASE64_DIGEST_LEN: usize = 44;
pub const KEY_LEN: usize = 32;
/// A child cookie jar that authenticates its cookies.
///
/// A _signed_ child jar signs all the cookies added to it and verifies cookies
/// retrieved from it. Any cookies stored in a `SignedJar` are assured integrity
/// and authenticity. In other words, clients cannot tamper with the contents of
/// a cookie nor can they fabricate cookie values, but the data is visible in
/// plaintext.
///
/// This type is only available when the `secure` feature is enabled.
pub struct SignedJar<'a> {
parent: &'a mut CookieJar,
key: SigningKey,
}
impl<'a> SignedJar<'a> {
/// Creates a new child `SignedJar` with parent `parent` and key `key`. This
/// method is typically called indirectly via the `signed` method of
/// `CookieJar`.
#[doc(hidden)]
pub fn new(parent: &'a mut CookieJar, key: &Key) -> SignedJar<'a> {
SignedJar {
parent,
key: SigningKey::new(HMAC_DIGEST, key.signing()),
}
}
/// Given a signed value `str` where the signature is prepended to `value`,
/// verifies the signed value and returns it. If there's a problem, returns
/// an `Err` with a string describing the issue.
fn verify(&self, cookie_value: &str) -> Result<String, &'static str> {
if cookie_value.len() < BASE64_DIGEST_LEN {
return Err("length of value is <= BASE64_DIGEST_LEN");
}
let (digest_str, value) = cookie_value.split_at(BASE64_DIGEST_LEN);
let sig = base64::decode(digest_str).map_err(|_| "bad base64 digest")?;
verify(&self.key, value.as_bytes(), &sig)
.map(|_| value.to_string())
.map_err(|_| "value did not verify")
}
/// Returns a reference to the `Cookie` inside this jar with the name `name`
/// and verifies the authenticity and integrity of the cookie's value,
/// returning a `Cookie` with the authenticated value. If the cookie cannot
/// be found, or the cookie fails to verify, `None` is returned.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie, Key};
///
/// let key = Key::generate();
/// let mut jar = CookieJar::new();
/// let mut signed_jar = jar.signed(&key);
/// assert!(signed_jar.get("name").is_none());
///
/// signed_jar.add(Cookie::new("name", "value"));
/// assert_eq!(signed_jar.get("name").unwrap().value(), "value");
/// ```
pub fn get(&self, name: &str) -> Option<Cookie<'static>> {
if let Some(cookie_ref) = self.parent.get(name) {
let mut cookie = cookie_ref.clone();
if let Ok(value) = self.verify(cookie.value()) {
cookie.set_value(value);
return Some(cookie);
}
}
None
}
/// Adds `cookie` to the parent jar. The cookie's value is signed assuring
/// integrity and authenticity.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie, Key};
///
/// let key = Key::generate();
/// let mut jar = CookieJar::new();
/// jar.signed(&key).add(Cookie::new("name", "value"));
///
/// assert_ne!(jar.get("name").unwrap().value(), "value");
/// assert!(jar.get("name").unwrap().value().contains("value"));
/// assert_eq!(jar.signed(&key).get("name").unwrap().value(), "value");
/// ```
pub fn add(&mut self, mut cookie: Cookie<'static>) {
self.sign_cookie(&mut cookie);
self.parent.add(cookie);
}
/// Adds an "original" `cookie` to this jar. The cookie's value is signed
/// assuring integrity and authenticity. Adding an original cookie does not
/// affect the [`CookieJar::delta()`](struct.CookieJar.html#method.delta)
/// computation. This method is intended to be used to seed the cookie jar
/// with cookies received from a client's HTTP message.
///
/// For accurate `delta` computations, this method should not be called
/// after calling `remove`.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie, Key};
///
/// let key = Key::generate();
/// let mut jar = CookieJar::new();
/// jar.signed(&key).add_original(Cookie::new("name", "value"));
///
/// assert_eq!(jar.iter().count(), 1);
/// assert_eq!(jar.delta().count(), 0);
/// ```
pub fn add_original(&mut self, mut cookie: Cookie<'static>) {
self.sign_cookie(&mut cookie);
self.parent.add_original(cookie);
}
/// Signs the cookie's value assuring integrity and authenticity.
fn sign_cookie(&self, cookie: &mut Cookie) {
let digest = sign(&self.key, cookie.value().as_bytes());
let mut new_value = base64::encode(digest.as_ref());
new_value.push_str(cookie.value());
cookie.set_value(new_value);
}
/// Removes `cookie` from the parent jar.
///
/// For correct removal, the passed in `cookie` must contain the same `path`
/// and `domain` as the cookie that was initially set.
///
/// See [CookieJar::remove](struct.CookieJar.html#method.remove) for more
/// details.
///
/// # Example
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie, Key};
///
/// let key = Key::generate();
/// let mut jar = CookieJar::new();
/// let mut signed_jar = jar.signed(&key);
///
/// signed_jar.add(Cookie::new("name", "value"));
/// assert!(signed_jar.get("name").is_some());
///
/// signed_jar.remove(Cookie::named("name"));
/// assert!(signed_jar.get("name").is_none());
/// ```
pub fn remove(&mut self, cookie: Cookie<'static>) {
self.parent.remove(cookie);
}
}
#[cfg(test)]
mod test {
use super::{Cookie, CookieJar, Key};
#[test]
fn simple() {
let key = Key::generate();
let mut jar = CookieJar::new();
assert_simple_behaviour!(jar, jar.signed(&key));
}
#[test]
fn private() {
let key = Key::generate();
let mut jar = CookieJar::new();
assert_secure_behaviour!(jar, jar.signed(&key));
}
}

View File

@ -0,0 +1,227 @@
use std::io::{self, Write};
use actix_threadpool::{run, CpuFuture};
#[cfg(feature = "brotli")]
use brotli2::write::BrotliDecoder;
use bytes::Bytes;
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
use flate2::write::{GzDecoder, ZlibDecoder};
use futures::{try_ready, Async, Future, Poll, Stream};
use super::Writer;
use crate::error::PayloadError;
use crate::http::header::{ContentEncoding, HeaderMap, CONTENT_ENCODING};
const INPLACE: usize = 2049;
pub struct Decoder<S> {
decoder: Option<ContentDecoder>,
stream: S,
eof: bool,
fut: Option<CpuFuture<(Option<Bytes>, ContentDecoder), io::Error>>,
}
impl<S> Decoder<S>
where
S: Stream<Item = Bytes, Error = PayloadError>,
{
/// Construct a decoder.
#[inline]
pub fn new(stream: S, encoding: ContentEncoding) -> Decoder<S> {
let decoder = match encoding {
#[cfg(feature = "brotli")]
ContentEncoding::Br => Some(ContentDecoder::Br(Box::new(
BrotliDecoder::new(Writer::new()),
))),
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new(
ZlibDecoder::new(Writer::new()),
))),
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoding::Gzip => Some(ContentDecoder::Gzip(Box::new(
GzDecoder::new(Writer::new()),
))),
_ => None,
};
Decoder {
decoder,
stream,
fut: None,
eof: false,
}
}
/// Construct decoder based on headers.
#[inline]
pub fn from_headers(stream: S, headers: &HeaderMap) -> Decoder<S> {
// check content-encoding
let encoding = if let Some(enc) = headers.get(&CONTENT_ENCODING) {
if let Ok(enc) = enc.to_str() {
ContentEncoding::from(enc)
} else {
ContentEncoding::Identity
}
} else {
ContentEncoding::Identity
};
Self::new(stream, encoding)
}
}
impl<S> Stream for Decoder<S>
where
S: Stream<Item = Bytes, Error = PayloadError>,
{
type Item = Bytes;
type Error = PayloadError;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
loop {
if let Some(ref mut fut) = self.fut {
let (chunk, decoder) = try_ready!(fut.poll());
self.decoder = Some(decoder);
self.fut.take();
if let Some(chunk) = chunk {
return Ok(Async::Ready(Some(chunk)));
}
}
if self.eof {
return Ok(Async::Ready(None));
}
match self.stream.poll()? {
Async::Ready(Some(chunk)) => {
if let Some(mut decoder) = self.decoder.take() {
if chunk.len() < INPLACE {
let chunk = decoder.feed_data(chunk)?;
self.decoder = Some(decoder);
if let Some(chunk) = chunk {
return Ok(Async::Ready(Some(chunk)));
}
} else {
self.fut = Some(run(move || {
let chunk = decoder.feed_data(chunk)?;
Ok((chunk, decoder))
}));
}
continue;
} else {
return Ok(Async::Ready(Some(chunk)));
}
}
Async::Ready(None) => {
self.eof = true;
return if let Some(mut decoder) = self.decoder.take() {
Ok(Async::Ready(decoder.feed_eof()?))
} else {
Ok(Async::Ready(None))
};
}
Async::NotReady => break,
}
}
Ok(Async::NotReady)
}
}
enum ContentDecoder {
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
Deflate(Box<ZlibDecoder<Writer>>),
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
Gzip(Box<GzDecoder<Writer>>),
#[cfg(feature = "brotli")]
Br(Box<BrotliDecoder<Writer>>),
}
impl ContentDecoder {
#[allow(unreachable_patterns)]
fn feed_eof(&mut self) -> io::Result<Option<Bytes>> {
match self {
#[cfg(feature = "brotli")]
ContentDecoder::Br(ref mut decoder) => match decoder.finish() {
Ok(mut writer) => {
let b = writer.take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentDecoder::Gzip(ref mut decoder) => match decoder.try_finish() {
Ok(_) => {
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentDecoder::Deflate(ref mut decoder) => match decoder.try_finish() {
Ok(_) => {
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
_ => Ok(None),
}
}
#[allow(unreachable_patterns)]
fn feed_data(&mut self, data: Bytes) -> io::Result<Option<Bytes>> {
match self {
#[cfg(feature = "brotli")]
ContentDecoder::Br(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentDecoder::Gzip(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentDecoder::Deflate(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
_ => Ok(Some(data)),
}
}
}

View File

@ -0,0 +1,257 @@
//! Stream encoder
use std::io::{self, Write};
use actix_threadpool::{run, CpuFuture};
#[cfg(feature = "brotli")]
use brotli2::write::BrotliEncoder;
use bytes::Bytes;
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
use flate2::write::{GzEncoder, ZlibEncoder};
use futures::{Async, Future, Poll};
use crate::body::{Body, BodySize, MessageBody, ResponseBody};
use crate::http::header::{ContentEncoding, CONTENT_ENCODING};
use crate::http::{HeaderValue, HttpTryFrom, StatusCode};
use crate::{Error, ResponseHead};
use super::Writer;
const INPLACE: usize = 2049;
pub struct Encoder<B> {
eof: bool,
body: EncoderBody<B>,
encoder: Option<ContentEncoder>,
fut: Option<CpuFuture<ContentEncoder, io::Error>>,
}
impl<B: MessageBody> Encoder<B> {
pub fn response(
encoding: ContentEncoding,
head: &mut ResponseHead,
body: ResponseBody<B>,
) -> ResponseBody<Encoder<B>> {
let can_encode = !(head.headers().contains_key(&CONTENT_ENCODING)
|| head.status == StatusCode::SWITCHING_PROTOCOLS
|| encoding == ContentEncoding::Identity
|| encoding == ContentEncoding::Auto);
let body = match body {
ResponseBody::Other(b) => match b {
Body::None => return ResponseBody::Other(Body::None),
Body::Empty => return ResponseBody::Other(Body::Empty),
Body::Bytes(buf) => {
if can_encode {
EncoderBody::Bytes(buf)
} else {
return ResponseBody::Other(Body::Bytes(buf));
}
}
Body::Message(stream) => EncoderBody::BoxedStream(stream),
},
ResponseBody::Body(stream) => EncoderBody::Stream(stream),
};
if can_encode {
update_head(encoding, head);
head.no_chunking(false);
ResponseBody::Body(Encoder {
body,
eof: false,
fut: None,
encoder: ContentEncoder::encoder(encoding),
})
} else {
ResponseBody::Body(Encoder {
body,
eof: false,
fut: None,
encoder: None,
})
}
}
}
enum EncoderBody<B> {
Bytes(Bytes),
Stream(B),
BoxedStream(Box<dyn MessageBody>),
}
impl<B: MessageBody> MessageBody for Encoder<B> {
fn size(&self) -> BodySize {
if self.encoder.is_none() {
match self.body {
EncoderBody::Bytes(ref b) => b.size(),
EncoderBody::Stream(ref b) => b.size(),
EncoderBody::BoxedStream(ref b) => b.size(),
}
} else {
BodySize::Stream
}
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
loop {
if self.eof {
return Ok(Async::Ready(None));
}
if let Some(ref mut fut) = self.fut {
let mut encoder = futures::try_ready!(fut.poll());
let chunk = encoder.take();
self.encoder = Some(encoder);
self.fut.take();
if !chunk.is_empty() {
return Ok(Async::Ready(Some(chunk)));
}
}
let result = match self.body {
EncoderBody::Bytes(ref mut b) => {
if b.is_empty() {
Async::Ready(None)
} else {
Async::Ready(Some(std::mem::replace(b, Bytes::new())))
}
}
EncoderBody::Stream(ref mut b) => b.poll_next()?,
EncoderBody::BoxedStream(ref mut b) => b.poll_next()?,
};
match result {
Async::NotReady => return Ok(Async::NotReady),
Async::Ready(Some(chunk)) => {
if let Some(mut encoder) = self.encoder.take() {
if chunk.len() < INPLACE {
encoder.write(&chunk)?;
let chunk = encoder.take();
self.encoder = Some(encoder);
if !chunk.is_empty() {
return Ok(Async::Ready(Some(chunk)));
}
} else {
self.fut = Some(run(move || {
encoder.write(&chunk)?;
Ok(encoder)
}));
}
} else {
return Ok(Async::Ready(Some(chunk)));
}
}
Async::Ready(None) => {
if let Some(encoder) = self.encoder.take() {
let chunk = encoder.finish()?;
if chunk.is_empty() {
return Ok(Async::Ready(None));
} else {
self.eof = true;
return Ok(Async::Ready(Some(chunk)));
}
} else {
return Ok(Async::Ready(None));
}
}
}
}
}
}
fn update_head(encoding: ContentEncoding, head: &mut ResponseHead) {
head.headers_mut().insert(
CONTENT_ENCODING,
HeaderValue::try_from(Bytes::from_static(encoding.as_str().as_bytes())).unwrap(),
);
}
enum ContentEncoder {
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
Deflate(ZlibEncoder<Writer>),
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
Gzip(GzEncoder<Writer>),
#[cfg(feature = "brotli")]
Br(BrotliEncoder<Writer>),
}
impl ContentEncoder {
fn encoder(encoding: ContentEncoding) -> Option<Self> {
match encoding {
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoding::Deflate => Some(ContentEncoder::Deflate(ZlibEncoder::new(
Writer::new(),
flate2::Compression::fast(),
))),
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoding::Gzip => Some(ContentEncoder::Gzip(GzEncoder::new(
Writer::new(),
flate2::Compression::fast(),
))),
#[cfg(feature = "brotli")]
ContentEncoding::Br => {
Some(ContentEncoder::Br(BrotliEncoder::new(Writer::new(), 3)))
}
_ => None,
}
}
#[inline]
pub(crate) fn take(&mut self) -> Bytes {
match *self {
#[cfg(feature = "brotli")]
ContentEncoder::Br(ref mut encoder) => encoder.get_mut().take(),
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoder::Deflate(ref mut encoder) => encoder.get_mut().take(),
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoder::Gzip(ref mut encoder) => encoder.get_mut().take(),
}
}
fn finish(self) -> Result<Bytes, io::Error> {
match self {
#[cfg(feature = "brotli")]
ContentEncoder::Br(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err),
},
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoder::Gzip(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err),
},
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoder::Deflate(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err),
},
}
}
fn write(&mut self, data: &[u8]) -> Result<(), io::Error> {
match *self {
#[cfg(feature = "brotli")]
ContentEncoder::Br(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()),
Err(err) => {
trace!("Error decoding br encoding: {}", err);
Err(err)
}
},
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoder::Gzip(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()),
Err(err) => {
trace!("Error decoding gzip encoding: {}", err);
Err(err)
}
},
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoder::Deflate(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()),
Err(err) => {
trace!("Error decoding deflate encoding: {}", err);
Err(err)
}
},
}
}
}

View File

@ -0,0 +1,35 @@
//! Content-Encoding support
use std::io;
use bytes::{Bytes, BytesMut};
mod decoder;
mod encoder;
pub use self::decoder::Decoder;
pub use self::encoder::Encoder;
pub(self) struct Writer {
buf: BytesMut,
}
impl Writer {
fn new() -> Writer {
Writer {
buf: BytesMut::with_capacity(8192),
}
}
fn take(&mut self) -> Bytes {
self.buf.take().freeze()
}
}
impl io::Write for Writer {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.buf.extend_from_slice(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}

1166
actix-http/src/error.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,91 @@
use std::any::{Any, TypeId};
use std::fmt;
use hashbrown::HashMap;
#[derive(Default)]
/// A type map of request extensions.
pub struct Extensions {
map: HashMap<TypeId, Box<Any>>,
}
impl Extensions {
/// Create an empty `Extensions`.
#[inline]
pub fn new() -> Extensions {
Extensions {
map: HashMap::default(),
}
}
/// Insert a type into this `Extensions`.
///
/// If a extension of this type already existed, it will
/// be returned.
pub fn insert<T: 'static>(&mut self, val: T) {
self.map.insert(TypeId::of::<T>(), Box::new(val));
}
/// Check if container contains entry
pub fn contains<T: 'static>(&self) -> bool {
self.map.get(&TypeId::of::<T>()).is_some()
}
/// Get a reference to a type previously inserted on this `Extensions`.
pub fn get<T: 'static>(&self) -> Option<&T> {
self.map
.get(&TypeId::of::<T>())
.and_then(|boxed| (&**boxed as &(Any + 'static)).downcast_ref())
}
/// Get a mutable reference to a type previously inserted on this `Extensions`.
pub fn get_mut<T: 'static>(&mut self) -> Option<&mut T> {
self.map
.get_mut(&TypeId::of::<T>())
.and_then(|boxed| (&mut **boxed as &mut (Any + 'static)).downcast_mut())
}
/// Remove a type from this `Extensions`.
///
/// If a extension of this type existed, it will be returned.
pub fn remove<T: 'static>(&mut self) -> Option<T> {
self.map.remove(&TypeId::of::<T>()).and_then(|boxed| {
(boxed as Box<Any + 'static>)
.downcast()
.ok()
.map(|boxed| *boxed)
})
}
/// Clear the `Extensions` of all inserted extensions.
#[inline]
pub fn clear(&mut self) {
self.map.clear();
}
}
impl fmt::Debug for Extensions {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Extensions").finish()
}
}
#[test]
fn test_extensions() {
#[derive(Debug, PartialEq)]
struct MyType(i32);
let mut extensions = Extensions::new();
extensions.insert(5i32);
extensions.insert(MyType(10));
assert_eq!(extensions.get(), Some(&5i32));
assert_eq!(extensions.get_mut(), Some(&mut 5i32));
assert_eq!(extensions.remove::<i32>(), Some(5i32));
assert!(extensions.get::<i32>().is_none());
assert_eq!(extensions.get::<bool>(), None);
assert_eq!(extensions.get(), Some(&MyType(10)));
}

245
actix-http/src/h1/client.rs Normal file
View File

@ -0,0 +1,245 @@
#![allow(unused_imports, unused_variables, dead_code)]
use std::io::{self, Write};
use actix_codec::{Decoder, Encoder};
use bitflags::bitflags;
use bytes::{BufMut, Bytes, BytesMut};
use http::header::{
HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING, UPGRADE,
};
use http::{Method, Version};
use super::decoder::{PayloadDecoder, PayloadItem, PayloadType};
use super::{decoder, encoder, reserve_readbuf};
use super::{Message, MessageType};
use crate::body::BodySize;
use crate::config::ServiceConfig;
use crate::error::{ParseError, PayloadError};
use crate::helpers;
use crate::message::{ConnectionType, Head, MessagePool, RequestHead, ResponseHead};
bitflags! {
struct Flags: u8 {
const HEAD = 0b0000_0001;
const KEEPALIVE_ENABLED = 0b0000_1000;
const STREAM = 0b0001_0000;
}
}
const AVERAGE_HEADER_SIZE: usize = 30;
/// HTTP/1 Codec
pub struct ClientCodec {
inner: ClientCodecInner,
}
/// HTTP/1 Payload Codec
pub struct ClientPayloadCodec {
inner: ClientCodecInner,
}
struct ClientCodecInner {
config: ServiceConfig,
decoder: decoder::MessageDecoder<ResponseHead>,
payload: Option<PayloadDecoder>,
version: Version,
ctype: ConnectionType,
// encoder part
flags: Flags,
headers_size: u32,
encoder: encoder::MessageEncoder<RequestHead>,
}
impl Default for ClientCodec {
fn default() -> Self {
ClientCodec::new(ServiceConfig::default())
}
}
impl ClientCodec {
/// Create HTTP/1 codec.
///
/// `keepalive_enabled` how response `connection` header get generated.
pub fn new(config: ServiceConfig) -> Self {
let flags = if config.keep_alive_enabled() {
Flags::KEEPALIVE_ENABLED
} else {
Flags::empty()
};
ClientCodec {
inner: ClientCodecInner {
config,
decoder: decoder::MessageDecoder::default(),
payload: None,
version: Version::HTTP_11,
ctype: ConnectionType::Close,
flags,
headers_size: 0,
encoder: encoder::MessageEncoder::default(),
},
}
}
/// Check if request is upgrade
pub fn upgrade(&self) -> bool {
self.inner.ctype == ConnectionType::Upgrade
}
/// Check if last response is keep-alive
pub fn keepalive(&self) -> bool {
self.inner.ctype == ConnectionType::KeepAlive
}
/// Check last request's message type
pub fn message_type(&self) -> MessageType {
if self.inner.flags.contains(Flags::STREAM) {
MessageType::Stream
} else if self.inner.payload.is_none() {
MessageType::None
} else {
MessageType::Payload
}
}
/// Convert message codec to a payload codec
pub fn into_payload_codec(self) -> ClientPayloadCodec {
ClientPayloadCodec { inner: self.inner }
}
}
impl ClientPayloadCodec {
/// Check if last response is keep-alive
pub fn keepalive(&self) -> bool {
self.inner.ctype == ConnectionType::KeepAlive
}
/// Transform payload codec to a message codec
pub fn into_message_codec(self) -> ClientCodec {
ClientCodec { inner: self.inner }
}
}
impl Decoder for ClientCodec {
type Item = ResponseHead;
type Error = ParseError;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
debug_assert!(!self.inner.payload.is_some(), "Payload decoder is set");
if let Some((req, payload)) = self.inner.decoder.decode(src)? {
if let Some(ctype) = req.ctype() {
// do not use peer's keep-alive
self.inner.ctype = if ctype == ConnectionType::KeepAlive {
self.inner.ctype
} else {
ctype
};
}
if !self.inner.flags.contains(Flags::HEAD) {
match payload {
PayloadType::None => self.inner.payload = None,
PayloadType::Payload(pl) => self.inner.payload = Some(pl),
PayloadType::Stream(pl) => {
self.inner.payload = Some(pl);
self.inner.flags.insert(Flags::STREAM);
}
}
} else {
self.inner.payload = None;
}
reserve_readbuf(src);
Ok(Some(req))
} else {
Ok(None)
}
}
}
impl Decoder for ClientPayloadCodec {
type Item = Option<Bytes>;
type Error = PayloadError;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
debug_assert!(
self.inner.payload.is_some(),
"Payload decoder is not specified"
);
Ok(match self.inner.payload.as_mut().unwrap().decode(src)? {
Some(PayloadItem::Chunk(chunk)) => {
reserve_readbuf(src);
Some(Some(chunk))
}
Some(PayloadItem::Eof) => {
self.inner.payload.take();
Some(None)
}
None => None,
})
}
}
impl Encoder for ClientCodec {
type Item = Message<(RequestHead, BodySize)>;
type Error = io::Error;
fn encode(
&mut self,
item: Self::Item,
dst: &mut BytesMut,
) -> Result<(), Self::Error> {
match item {
Message::Item((mut msg, length)) => {
let inner = &mut self.inner;
inner.version = msg.version;
inner.flags.set(Flags::HEAD, msg.method == Method::HEAD);
// connection status
inner.ctype = match msg.connection_type() {
ConnectionType::KeepAlive => {
if inner.flags.contains(Flags::KEEPALIVE_ENABLED) {
ConnectionType::KeepAlive
} else {
ConnectionType::Close
}
}
ConnectionType::Upgrade => ConnectionType::Upgrade,
ConnectionType::Close => ConnectionType::Close,
};
inner.encoder.encode(
dst,
&mut msg,
false,
false,
inner.version,
length,
inner.ctype,
&inner.config,
)?;
}
Message::Chunk(Some(bytes)) => {
self.inner.encoder.encode_chunk(bytes.as_ref(), dst)?;
}
Message::Chunk(None) => {
self.inner.encoder.encode_eof(dst)?;
}
}
Ok(())
}
}
pub struct Writer<'a>(pub &'a mut BytesMut);
impl<'a> io::Write for Writer<'a> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.extend_from_slice(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}

248
actix-http/src/h1/codec.rs Normal file
View File

@ -0,0 +1,248 @@
#![allow(unused_imports, unused_variables, dead_code)]
use std::io::Write;
use std::{fmt, io, net};
use actix_codec::{Decoder, Encoder};
use bitflags::bitflags;
use bytes::{BufMut, Bytes, BytesMut};
use http::header::{HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING};
use http::{Method, StatusCode, Version};
use super::decoder::{PayloadDecoder, PayloadItem, PayloadType};
use super::{decoder, encoder};
use super::{Message, MessageType};
use crate::body::BodySize;
use crate::config::ServiceConfig;
use crate::error::ParseError;
use crate::helpers;
use crate::message::{ConnectionType, Head, ResponseHead};
use crate::request::Request;
use crate::response::Response;
bitflags! {
struct Flags: u8 {
const HEAD = 0b0000_0001;
const KEEPALIVE_ENABLED = 0b0000_0010;
const STREAM = 0b0000_0100;
}
}
const AVERAGE_HEADER_SIZE: usize = 30;
/// HTTP/1 Codec
pub struct Codec {
pub(crate) config: ServiceConfig,
decoder: decoder::MessageDecoder<Request>,
payload: Option<PayloadDecoder>,
version: Version,
ctype: ConnectionType,
// encoder part
flags: Flags,
encoder: encoder::MessageEncoder<Response<()>>,
}
impl Default for Codec {
fn default() -> Self {
Codec::new(ServiceConfig::default())
}
}
impl fmt::Debug for Codec {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "h1::Codec({:?})", self.flags)
}
}
impl Codec {
/// Create HTTP/1 codec.
///
/// `keepalive_enabled` how response `connection` header get generated.
pub fn new(config: ServiceConfig) -> Self {
let flags = if config.keep_alive_enabled() {
Flags::KEEPALIVE_ENABLED
} else {
Flags::empty()
};
Codec {
config,
flags,
decoder: decoder::MessageDecoder::default(),
payload: None,
version: Version::HTTP_11,
ctype: ConnectionType::Close,
encoder: encoder::MessageEncoder::default(),
}
}
#[inline]
/// Check if request is upgrade
pub fn upgrade(&self) -> bool {
self.ctype == ConnectionType::Upgrade
}
#[inline]
/// Check if last response is keep-alive
pub fn keepalive(&self) -> bool {
self.ctype == ConnectionType::KeepAlive
}
#[inline]
/// Check if keep-alive enabled on server level
pub fn keepalive_enabled(&self) -> bool {
self.flags.contains(Flags::KEEPALIVE_ENABLED)
}
#[inline]
/// Check last request's message type
pub fn message_type(&self) -> MessageType {
if self.flags.contains(Flags::STREAM) {
MessageType::Stream
} else if self.payload.is_none() {
MessageType::None
} else {
MessageType::Payload
}
}
}
impl Decoder for Codec {
type Item = Message<Request>;
type Error = ParseError;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
if self.payload.is_some() {
Ok(match self.payload.as_mut().unwrap().decode(src)? {
Some(PayloadItem::Chunk(chunk)) => Some(Message::Chunk(Some(chunk))),
Some(PayloadItem::Eof) => {
self.payload.take();
Some(Message::Chunk(None))
}
None => None,
})
} else if let Some((req, payload)) = self.decoder.decode(src)? {
let head = req.head();
self.flags.set(Flags::HEAD, head.method == Method::HEAD);
self.version = head.version;
self.ctype = head.connection_type();
if self.ctype == ConnectionType::KeepAlive
&& !self.flags.contains(Flags::KEEPALIVE_ENABLED)
{
self.ctype = ConnectionType::Close
}
match payload {
PayloadType::None => self.payload = None,
PayloadType::Payload(pl) => self.payload = Some(pl),
PayloadType::Stream(pl) => {
self.payload = Some(pl);
self.flags.insert(Flags::STREAM);
}
}
Ok(Some(Message::Item(req)))
} else {
Ok(None)
}
}
}
impl Encoder for Codec {
type Item = Message<(Response<()>, BodySize)>;
type Error = io::Error;
fn encode(
&mut self,
item: Self::Item,
dst: &mut BytesMut,
) -> Result<(), Self::Error> {
match item {
Message::Item((mut res, length)) => {
// set response version
res.head_mut().version = self.version;
// connection status
self.ctype = if let Some(ct) = res.head().ctype() {
if ct == ConnectionType::KeepAlive {
self.ctype
} else {
ct
}
} else {
self.ctype
};
// encode message
let len = dst.len();
self.encoder.encode(
dst,
&mut res,
self.flags.contains(Flags::HEAD),
self.flags.contains(Flags::STREAM),
self.version,
length,
self.ctype,
&self.config,
)?;
// self.headers_size = (dst.len() - len) as u32;
}
Message::Chunk(Some(bytes)) => {
self.encoder.encode_chunk(bytes.as_ref(), dst)?;
}
Message::Chunk(None) => {
self.encoder.encode_eof(dst)?;
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use std::{cmp, io};
use actix_codec::{AsyncRead, AsyncWrite};
use bytes::{Buf, Bytes, BytesMut};
use http::{Method, Version};
use super::*;
use crate::error::ParseError;
use crate::h1::Message;
use crate::httpmessage::HttpMessage;
use crate::request::Request;
#[test]
fn test_http_request_chunked_payload_and_next_message() {
let mut codec = Codec::default();
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let item = codec.decode(&mut buf).unwrap().unwrap();
let req = item.message();
assert_eq!(req.method(), Method::GET);
assert!(req.chunked().unwrap());
buf.extend(
b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n\
POST /test2 HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n"
.iter(),
);
let msg = codec.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"data");
let msg = codec.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"line");
let msg = codec.decode(&mut buf).unwrap().unwrap();
assert!(msg.eof());
// decode next message
let item = codec.decode(&mut buf).unwrap().unwrap();
let req = item.message();
assert_eq!(*req.method(), Method::POST);
assert!(req.chunked().unwrap());
}
}

1194
actix-http/src/h1/decoder.rs Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,854 @@
use std::collections::VecDeque;
use std::time::Instant;
use std::{fmt, io, net};
use actix_codec::{Decoder, Encoder, Framed, FramedParts};
use actix_server_config::IoStream;
use actix_service::Service;
use actix_utils::cloneable::CloneableService;
use bitflags::bitflags;
use bytes::{BufMut, BytesMut};
use futures::{Async, Future, Poll};
use log::{error, trace};
use tokio_timer::Delay;
use crate::body::{Body, BodySize, MessageBody, ResponseBody};
use crate::config::ServiceConfig;
use crate::error::{DispatchError, Error};
use crate::error::{ParseError, PayloadError};
use crate::request::Request;
use crate::response::Response;
use super::codec::Codec;
use super::payload::{Payload, PayloadSender, PayloadStatus};
use super::{Message, MessageType};
const LW_BUFFER_SIZE: usize = 4096;
const HW_BUFFER_SIZE: usize = 32_768;
const MAX_PIPELINED_MESSAGES: usize = 16;
bitflags! {
pub struct Flags: u8 {
const STARTED = 0b0000_0001;
const KEEPALIVE = 0b0000_0010;
const POLLED = 0b0000_0100;
const SHUTDOWN = 0b0000_1000;
const READ_DISCONNECT = 0b0001_0000;
const WRITE_DISCONNECT = 0b0010_0000;
const UPGRADE = 0b0100_0000;
}
}
/// Dispatcher for HTTP/1.1 protocol
pub struct Dispatcher<T, S, B, X, U>
where
S: Service<Request = Request>,
S::Error: Into<Error>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
inner: DispatcherState<T, S, B, X, U>,
}
enum DispatcherState<T, S, B, X, U>
where
S: Service<Request = Request>,
S::Error: Into<Error>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
Normal(InnerDispatcher<T, S, B, X, U>),
Upgrade(U::Future),
None,
}
struct InnerDispatcher<T, S, B, X, U>
where
S: Service<Request = Request>,
S::Error: Into<Error>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
service: CloneableService<S>,
expect: CloneableService<X>,
upgrade: Option<CloneableService<U>>,
flags: Flags,
peer_addr: Option<net::SocketAddr>,
error: Option<DispatchError>,
state: State<S, B, X>,
payload: Option<PayloadSender>,
messages: VecDeque<DispatcherMessage>,
ka_expire: Instant,
ka_timer: Option<Delay>,
io: T,
read_buf: BytesMut,
write_buf: BytesMut,
codec: Codec,
}
enum DispatcherMessage {
Item(Request),
Upgrade(Request),
Error(Response<()>),
}
enum State<S, B, X>
where
S: Service<Request = Request>,
X: Service<Request = Request, Response = Request>,
B: MessageBody,
{
None,
ExpectCall(X::Future),
ServiceCall(S::Future),
SendPayload(ResponseBody<B>),
}
impl<S, B, X> State<S, B, X>
where
S: Service<Request = Request>,
X: Service<Request = Request, Response = Request>,
B: MessageBody,
{
fn is_empty(&self) -> bool {
if let State::None = self {
true
} else {
false
}
}
fn is_call(&self) -> bool {
if let State::ServiceCall(_) = self {
true
} else {
false
}
}
}
enum PollResponse {
Upgrade(Request),
DoNothing,
DrainWriteBuf,
}
impl PartialEq for PollResponse {
fn eq(&self, other: &PollResponse) -> bool {
match self {
PollResponse::DrainWriteBuf => match other {
PollResponse::DrainWriteBuf => true,
_ => false,
},
PollResponse::DoNothing => match other {
PollResponse::DoNothing => true,
_ => false,
},
_ => false,
}
}
}
impl<T, S, B, X, U> Dispatcher<T, S, B, X, U>
where
T: IoStream,
S: Service<Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
/// Create http/1 dispatcher.
pub fn new(
stream: T,
config: ServiceConfig,
service: CloneableService<S>,
expect: CloneableService<X>,
upgrade: Option<CloneableService<U>>,
) -> Self {
Dispatcher::with_timeout(
stream,
Codec::new(config.clone()),
config,
BytesMut::with_capacity(HW_BUFFER_SIZE),
None,
service,
expect,
upgrade,
)
}
/// Create http/1 dispatcher with slow request timeout.
pub fn with_timeout(
io: T,
codec: Codec,
config: ServiceConfig,
read_buf: BytesMut,
timeout: Option<Delay>,
service: CloneableService<S>,
expect: CloneableService<X>,
upgrade: Option<CloneableService<U>>,
) -> Self {
let keepalive = config.keep_alive_enabled();
let flags = if keepalive {
Flags::KEEPALIVE
} else {
Flags::empty()
};
// keep-alive timer
let (ka_expire, ka_timer) = if let Some(delay) = timeout {
(delay.deadline(), Some(delay))
} else if let Some(delay) = config.keep_alive_timer() {
(delay.deadline(), Some(delay))
} else {
(config.now(), None)
};
Dispatcher {
inner: DispatcherState::Normal(InnerDispatcher {
write_buf: BytesMut::with_capacity(HW_BUFFER_SIZE),
payload: None,
state: State::None,
error: None,
peer_addr: io.peer_addr(),
messages: VecDeque::new(),
io,
codec,
read_buf,
service,
expect,
upgrade,
flags,
ka_expire,
ka_timer,
}),
}
}
}
impl<T, S, B, X, U> InnerDispatcher<T, S, B, X, U>
where
T: IoStream,
S: Service<Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
fn can_read(&self) -> bool {
if self
.flags
.intersects(Flags::READ_DISCONNECT | Flags::UPGRADE)
{
false
} else if let Some(ref info) = self.payload {
info.need_read() == PayloadStatus::Read
} else {
true
}
}
// if checked is set to true, delay disconnect until all tasks have finished.
fn client_disconnected(&mut self) {
self.flags
.insert(Flags::READ_DISCONNECT | Flags::WRITE_DISCONNECT);
if let Some(mut payload) = self.payload.take() {
payload.set_error(PayloadError::Incomplete(None));
}
}
/// Flush stream
///
/// true - got whouldblock
/// false - didnt get whouldblock
fn poll_flush(&mut self) -> Result<bool, DispatchError> {
if self.write_buf.is_empty() {
return Ok(false);
}
let len = self.write_buf.len();
let mut written = 0;
while written < len {
match self.io.write(&self.write_buf[written..]) {
Ok(0) => {
return Err(DispatchError::Io(io::Error::new(
io::ErrorKind::WriteZero,
"",
)));
}
Ok(n) => {
written += n;
}
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => {
if written > 0 {
let _ = self.write_buf.split_to(written);
}
return Ok(true);
}
Err(err) => return Err(DispatchError::Io(err)),
}
}
if written > 0 {
if written == self.write_buf.len() {
unsafe { self.write_buf.set_len(0) }
} else {
let _ = self.write_buf.split_to(written);
}
}
Ok(false)
}
fn send_response(
&mut self,
message: Response<()>,
body: ResponseBody<B>,
) -> Result<State<S, B, X>, DispatchError> {
self.codec
.encode(Message::Item((message, body.size())), &mut self.write_buf)
.map_err(|err| {
if let Some(mut payload) = self.payload.take() {
payload.set_error(PayloadError::Incomplete(None));
}
DispatchError::Io(err)
})?;
self.flags.set(Flags::KEEPALIVE, self.codec.keepalive());
match body.size() {
BodySize::None | BodySize::Empty => Ok(State::None),
_ => Ok(State::SendPayload(body)),
}
}
fn send_continue(&mut self) {
self.write_buf
.extend_from_slice(b"HTTP/1.1 100 Continue\r\n\r\n");
}
fn poll_response(&mut self) -> Result<PollResponse, DispatchError> {
loop {
let state = match self.state {
State::None => match self.messages.pop_front() {
Some(DispatcherMessage::Item(req)) => {
Some(self.handle_request(req)?)
}
Some(DispatcherMessage::Error(res)) => {
Some(self.send_response(res, ResponseBody::Other(Body::Empty))?)
}
Some(DispatcherMessage::Upgrade(req)) => {
return Ok(PollResponse::Upgrade(req));
}
None => None,
},
State::ExpectCall(ref mut fut) => match fut.poll() {
Ok(Async::Ready(req)) => {
self.send_continue();
self.state = State::ServiceCall(self.service.call(req));
continue;
}
Ok(Async::NotReady) => None,
Err(e) => {
let res: Response = e.into().into();
let (res, body) = res.replace_body(());
Some(self.send_response(res, body.into_body())?)
}
},
State::ServiceCall(ref mut fut) => match fut.poll() {
Ok(Async::Ready(res)) => {
let (res, body) = res.into().replace_body(());
self.state = self.send_response(res, body)?;
continue;
}
Ok(Async::NotReady) => None,
Err(e) => {
let res: Response = e.into().into();
let (res, body) = res.replace_body(());
Some(self.send_response(res, body.into_body())?)
}
},
State::SendPayload(ref mut stream) => {
loop {
if self.write_buf.len() < HW_BUFFER_SIZE {
match stream
.poll_next()
.map_err(|_| DispatchError::Unknown)?
{
Async::Ready(Some(item)) => {
self.codec.encode(
Message::Chunk(Some(item)),
&mut self.write_buf,
)?;
continue;
}
Async::Ready(None) => {
self.codec.encode(
Message::Chunk(None),
&mut self.write_buf,
)?;
self.state = State::None;
}
Async::NotReady => return Ok(PollResponse::DoNothing),
}
} else {
return Ok(PollResponse::DrainWriteBuf);
}
break;
}
continue;
}
};
// set new state
if let Some(state) = state {
self.state = state;
if !self.state.is_empty() {
continue;
}
} else {
// if read-backpressure is enabled and we consumed some data.
// we may read more data and retry
if self.state.is_call() {
if self.poll_request()? {
continue;
}
} else if !self.messages.is_empty() {
continue;
}
}
break;
}
Ok(PollResponse::DoNothing)
}
fn handle_request(&mut self, req: Request) -> Result<State<S, B, X>, DispatchError> {
// Handle `EXPECT: 100-Continue` header
let req = if req.head().expect() {
let mut task = self.expect.call(req);
match task.poll() {
Ok(Async::Ready(req)) => {
self.send_continue();
req
}
Ok(Async::NotReady) => return Ok(State::ExpectCall(task)),
Err(e) => {
let e = e.into();
let res: Response = e.into();
let (res, body) = res.replace_body(());
return self.send_response(res, body.into_body());
}
}
} else {
req
};
// Call service
let mut task = self.service.call(req);
match task.poll() {
Ok(Async::Ready(res)) => {
let (res, body) = res.into().replace_body(());
self.send_response(res, body)
}
Ok(Async::NotReady) => Ok(State::ServiceCall(task)),
Err(e) => {
let res: Response = e.into().into();
let (res, body) = res.replace_body(());
self.send_response(res, body.into_body())
}
}
}
/// Process one incoming requests
pub(self) fn poll_request(&mut self) -> Result<bool, DispatchError> {
// limit a mount of non processed requests
if self.messages.len() >= MAX_PIPELINED_MESSAGES || !self.can_read() {
return Ok(false);
}
let mut updated = false;
loop {
match self.codec.decode(&mut self.read_buf) {
Ok(Some(msg)) => {
updated = true;
self.flags.insert(Flags::STARTED);
match msg {
Message::Item(mut req) => {
let pl = self.codec.message_type();
req.head_mut().peer_addr = self.peer_addr;
if pl == MessageType::Stream && self.upgrade.is_some() {
self.messages.push_back(DispatcherMessage::Upgrade(req));
break;
}
if pl == MessageType::Payload || pl == MessageType::Stream {
let (ps, pl) = Payload::create(false);
let (req1, _) =
req.replace_payload(crate::Payload::H1(pl));
req = req1;
self.payload = Some(ps);
}
// handle request early
if self.state.is_empty() {
self.state = self.handle_request(req)?;
} else {
self.messages.push_back(DispatcherMessage::Item(req));
}
}
Message::Chunk(Some(chunk)) => {
if let Some(ref mut payload) = self.payload {
payload.feed_data(chunk);
} else {
error!(
"Internal server error: unexpected payload chunk"
);
self.flags.insert(Flags::READ_DISCONNECT);
self.messages.push_back(DispatcherMessage::Error(
Response::InternalServerError().finish().drop_body(),
));
self.error = Some(DispatchError::InternalError);
break;
}
}
Message::Chunk(None) => {
if let Some(mut payload) = self.payload.take() {
payload.feed_eof();
} else {
error!("Internal server error: unexpected eof");
self.flags.insert(Flags::READ_DISCONNECT);
self.messages.push_back(DispatcherMessage::Error(
Response::InternalServerError().finish().drop_body(),
));
self.error = Some(DispatchError::InternalError);
break;
}
}
}
}
Ok(None) => break,
Err(ParseError::Io(e)) => {
self.client_disconnected();
self.error = Some(DispatchError::Io(e));
break;
}
Err(e) => {
if let Some(mut payload) = self.payload.take() {
payload.set_error(PayloadError::EncodingCorrupted);
}
// Malformed requests should be responded with 400
self.messages.push_back(DispatcherMessage::Error(
Response::BadRequest().finish().drop_body(),
));
self.flags.insert(Flags::READ_DISCONNECT);
self.error = Some(e.into());
break;
}
}
}
if updated && self.ka_timer.is_some() {
if let Some(expire) = self.codec.config.keep_alive_expire() {
self.ka_expire = expire;
}
}
Ok(updated)
}
/// keep-alive timer
fn poll_keepalive(&mut self) -> Result<(), DispatchError> {
if self.ka_timer.is_none() {
// shutdown timeout
if self.flags.contains(Flags::SHUTDOWN) {
if let Some(interval) = self.codec.config.client_disconnect_timer() {
self.ka_timer = Some(Delay::new(interval));
} else {
self.flags.insert(Flags::READ_DISCONNECT);
return Ok(());
}
} else {
return Ok(());
}
}
match self.ka_timer.as_mut().unwrap().poll().map_err(|e| {
error!("Timer error {:?}", e);
DispatchError::Unknown
})? {
Async::Ready(_) => {
// if we get timeout during shutdown, drop connection
if self.flags.contains(Flags::SHUTDOWN) {
return Err(DispatchError::DisconnectTimeout);
} else if self.ka_timer.as_mut().unwrap().deadline() >= self.ka_expire {
// check for any outstanding tasks
if self.state.is_empty() && self.write_buf.is_empty() {
if self.flags.contains(Flags::STARTED) {
trace!("Keep-alive timeout, close connection");
self.flags.insert(Flags::SHUTDOWN);
// start shutdown timer
if let Some(deadline) =
self.codec.config.client_disconnect_timer()
{
if let Some(timer) = self.ka_timer.as_mut() {
timer.reset(deadline);
let _ = timer.poll();
}
} else {
// no shutdown timeout, drop socket
self.flags.insert(Flags::WRITE_DISCONNECT);
return Ok(());
}
} else {
// timeout on first request (slow request) return 408
if !self.flags.contains(Flags::STARTED) {
trace!("Slow request timeout");
let _ = self.send_response(
Response::RequestTimeout().finish().drop_body(),
ResponseBody::Other(Body::Empty),
);
} else {
trace!("Keep-alive connection timeout");
}
self.flags.insert(Flags::STARTED | Flags::SHUTDOWN);
self.state = State::None;
}
} else if let Some(deadline) = self.codec.config.keep_alive_expire()
{
if let Some(timer) = self.ka_timer.as_mut() {
timer.reset(deadline);
let _ = timer.poll();
}
}
} else if let Some(timer) = self.ka_timer.as_mut() {
timer.reset(self.ka_expire);
let _ = timer.poll();
}
}
Async::NotReady => (),
}
Ok(())
}
}
impl<T, S, B, X, U> Future for Dispatcher<T, S, B, X, U>
where
T: IoStream,
S: Service<Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
type Item = ();
type Error = DispatchError;
#[inline]
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.inner {
DispatcherState::Normal(ref mut inner) => {
inner.poll_keepalive()?;
if inner.flags.contains(Flags::SHUTDOWN) {
if inner.flags.contains(Flags::WRITE_DISCONNECT) {
Ok(Async::Ready(()))
} else {
// flush buffer
inner.poll_flush()?;
if !inner.write_buf.is_empty() {
Ok(Async::NotReady)
} else {
match inner.io.shutdown()? {
Async::Ready(_) => Ok(Async::Ready(())),
Async::NotReady => Ok(Async::NotReady),
}
}
}
} else {
// read socket into a buf
if !inner.flags.contains(Flags::READ_DISCONNECT) {
if let Some(true) =
read_available(&mut inner.io, &mut inner.read_buf)?
{
inner.flags.insert(Flags::READ_DISCONNECT)
}
}
inner.poll_request()?;
loop {
if inner.write_buf.remaining_mut() < LW_BUFFER_SIZE {
inner.write_buf.reserve(HW_BUFFER_SIZE);
}
let result = inner.poll_response()?;
let drain = result == PollResponse::DrainWriteBuf;
// switch to upgrade handler
if let PollResponse::Upgrade(req) = result {
if let DispatcherState::Normal(inner) =
std::mem::replace(&mut self.inner, DispatcherState::None)
{
let mut parts = FramedParts::with_read_buf(
inner.io,
inner.codec,
inner.read_buf,
);
parts.write_buf = inner.write_buf;
let framed = Framed::from_parts(parts);
self.inner = DispatcherState::Upgrade(
inner.upgrade.unwrap().call((req, framed)),
);
return self.poll();
} else {
panic!()
}
}
// we didnt get WouldBlock from write operation,
// so data get written to kernel completely (OSX)
// and we have to write again otherwise response can get stuck
if inner.poll_flush()? || !drain {
break;
}
}
// client is gone
if inner.flags.contains(Flags::WRITE_DISCONNECT) {
return Ok(Async::Ready(()));
}
let is_empty = inner.state.is_empty();
// read half is closed and we do not processing any responses
if inner.flags.contains(Flags::READ_DISCONNECT) && is_empty {
inner.flags.insert(Flags::SHUTDOWN);
}
// keep-alive and stream errors
if is_empty && inner.write_buf.is_empty() {
if let Some(err) = inner.error.take() {
Err(err)
}
// disconnect if keep-alive is not enabled
else if inner.flags.contains(Flags::STARTED)
&& !inner.flags.intersects(Flags::KEEPALIVE)
{
inner.flags.insert(Flags::SHUTDOWN);
self.poll()
}
// disconnect if shutdown
else if inner.flags.contains(Flags::SHUTDOWN) {
self.poll()
} else {
Ok(Async::NotReady)
}
} else {
Ok(Async::NotReady)
}
}
}
DispatcherState::Upgrade(ref mut fut) => fut.poll().map_err(|e| {
error!("Upgrade handler error: {}", e);
DispatchError::Upgrade
}),
DispatcherState::None => panic!(),
}
}
}
fn read_available<T>(io: &mut T, buf: &mut BytesMut) -> Result<Option<bool>, io::Error>
where
T: io::Read,
{
let mut read_some = false;
loop {
if buf.remaining_mut() < LW_BUFFER_SIZE {
buf.reserve(HW_BUFFER_SIZE);
}
let read = unsafe { io.read(buf.bytes_mut()) };
match read {
Ok(n) => {
if n == 0 {
return Ok(Some(true));
} else {
read_some = true;
unsafe {
buf.advance_mut(n);
}
}
}
Err(e) => {
return if e.kind() == io::ErrorKind::WouldBlock {
if read_some {
Ok(Some(false))
} else {
Ok(None)
}
} else if e.kind() == io::ErrorKind::ConnectionReset && read_some {
Ok(Some(true))
} else {
Err(e)
};
}
}
}
}
#[cfg(test)]
mod tests {
use actix_service::IntoService;
use futures::future::{lazy, ok};
use super::*;
use crate::error::Error;
use crate::h1::{ExpectHandler, UpgradeHandler};
use crate::test::TestBuffer;
#[test]
fn test_req_parse_err() {
let mut sys = actix_rt::System::new("test");
let _ = sys.block_on(lazy(|| {
let buf = TestBuffer::new("GET /test HTTP/1\r\n\r\n");
let mut h1 = Dispatcher::<_, _, _, _, UpgradeHandler<TestBuffer>>::new(
buf,
ServiceConfig::default(),
CloneableService::new(
(|_| ok::<_, Error>(Response::Ok().finish())).into_service(),
),
CloneableService::new(ExpectHandler),
None,
);
assert!(h1.poll().is_err());
if let DispatcherState::Normal(ref inner) = h1.inner {
assert!(inner.flags.contains(Flags::READ_DISCONNECT));
assert_eq!(&inner.io.write_buf[..26], b"HTTP/1.1 400 Bad Request\r\n");
}
ok::<_, ()>(())
}));
}
}

View File

@ -0,0 +1,439 @@
#![allow(unused_imports, unused_variables, dead_code)]
use std::fmt::Write as FmtWrite;
use std::io::Write;
use std::marker::PhantomData;
use std::str::FromStr;
use std::{cmp, fmt, io, mem};
use bytes::{BufMut, Bytes, BytesMut};
use crate::body::BodySize;
use crate::config::ServiceConfig;
use crate::header::{map, ContentEncoding};
use crate::helpers;
use crate::http::header::{
HeaderValue, ACCEPT_ENCODING, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING,
};
use crate::http::{HeaderMap, Method, StatusCode, Version};
use crate::message::{ConnectionType, Head, RequestHead, ResponseHead};
use crate::request::Request;
use crate::response::Response;
const AVERAGE_HEADER_SIZE: usize = 30;
#[derive(Debug)]
pub(crate) struct MessageEncoder<T: MessageType> {
pub length: BodySize,
pub te: TransferEncoding,
_t: PhantomData<T>,
}
impl<T: MessageType> Default for MessageEncoder<T> {
fn default() -> Self {
MessageEncoder {
length: BodySize::None,
te: TransferEncoding::empty(),
_t: PhantomData,
}
}
}
pub(crate) trait MessageType: Sized {
fn status(&self) -> Option<StatusCode>;
fn headers(&self) -> &HeaderMap;
fn chunked(&self) -> bool;
fn encode_status(&mut self, dst: &mut BytesMut) -> io::Result<()>;
fn encode_headers(
&mut self,
dst: &mut BytesMut,
version: Version,
mut length: BodySize,
ctype: ConnectionType,
config: &ServiceConfig,
) -> io::Result<()> {
let chunked = self.chunked();
let mut skip_len = length != BodySize::Stream;
// Content length
if let Some(status) = self.status() {
match status {
StatusCode::NO_CONTENT
| StatusCode::CONTINUE
| StatusCode::PROCESSING => length = BodySize::None,
StatusCode::SWITCHING_PROTOCOLS => {
skip_len = true;
length = BodySize::Stream;
}
_ => (),
}
}
match length {
BodySize::Stream => {
if chunked {
dst.put_slice(b"\r\ntransfer-encoding: chunked\r\n")
} else {
skip_len = false;
dst.put_slice(b"\r\n");
}
}
BodySize::Empty => {
dst.put_slice(b"\r\ncontent-length: 0\r\n");
}
BodySize::Sized(len) => helpers::write_content_length(len, dst),
BodySize::Sized64(len) => {
dst.put_slice(b"\r\ncontent-length: ");
write!(dst.writer(), "{}\r\n", len)?;
}
BodySize::None => dst.put_slice(b"\r\n"),
}
// Connection
match ctype {
ConnectionType::Upgrade => dst.put_slice(b"connection: upgrade\r\n"),
ConnectionType::KeepAlive if version < Version::HTTP_11 => {
dst.put_slice(b"connection: keep-alive\r\n")
}
ConnectionType::Close if version >= Version::HTTP_11 => {
dst.put_slice(b"connection: close\r\n")
}
_ => (),
}
// write headers
let mut pos = 0;
let mut has_date = false;
let mut remaining = dst.remaining_mut();
let mut buf = unsafe { &mut *(dst.bytes_mut() as *mut [u8]) };
for (key, value) in self.headers().inner.iter() {
match *key {
CONNECTION => continue,
TRANSFER_ENCODING | CONTENT_LENGTH if skip_len => continue,
DATE => {
has_date = true;
}
_ => (),
}
let k = key.as_str().as_bytes();
match value {
map::Value::One(ref val) => {
let v = val.as_ref();
let len = k.len() + v.len() + 4;
if len > remaining {
unsafe {
dst.advance_mut(pos);
}
pos = 0;
dst.reserve(len * 2);
remaining = dst.remaining_mut();
unsafe {
buf = &mut *(dst.bytes_mut() as *mut _);
}
}
buf[pos..pos + k.len()].copy_from_slice(k);
pos += k.len();
buf[pos..pos + 2].copy_from_slice(b": ");
pos += 2;
buf[pos..pos + v.len()].copy_from_slice(v);
pos += v.len();
buf[pos..pos + 2].copy_from_slice(b"\r\n");
pos += 2;
remaining -= len;
}
map::Value::Multi(ref vec) => {
for val in vec {
let v = val.as_ref();
let len = k.len() + v.len() + 4;
if len > remaining {
unsafe {
dst.advance_mut(pos);
}
pos = 0;
dst.reserve(len * 2);
remaining = dst.remaining_mut();
unsafe {
buf = &mut *(dst.bytes_mut() as *mut _);
}
}
buf[pos..pos + k.len()].copy_from_slice(k);
pos += k.len();
buf[pos..pos + 2].copy_from_slice(b": ");
pos += 2;
buf[pos..pos + v.len()].copy_from_slice(v);
pos += v.len();
buf[pos..pos + 2].copy_from_slice(b"\r\n");
pos += 2;
remaining -= len;
}
}
}
}
unsafe {
dst.advance_mut(pos);
}
// optimized date header, set_date writes \r\n
if !has_date {
config.set_date(dst);
} else {
// msg eof
dst.extend_from_slice(b"\r\n");
}
Ok(())
}
}
impl MessageType for Response<()> {
fn status(&self) -> Option<StatusCode> {
Some(self.head().status)
}
fn chunked(&self) -> bool {
self.head().chunked()
}
fn headers(&self) -> &HeaderMap {
&self.head().headers
}
fn encode_status(&mut self, dst: &mut BytesMut) -> io::Result<()> {
let head = self.head();
let reason = head.reason().as_bytes();
dst.reserve(256 + head.headers.len() * AVERAGE_HEADER_SIZE + reason.len());
// status line
helpers::write_status_line(head.version, head.status.as_u16(), dst);
dst.put_slice(reason);
Ok(())
}
}
impl MessageType for RequestHead {
fn status(&self) -> Option<StatusCode> {
None
}
fn chunked(&self) -> bool {
self.chunked()
}
fn headers(&self) -> &HeaderMap {
&self.headers
}
fn encode_status(&mut self, dst: &mut BytesMut) -> io::Result<()> {
dst.reserve(256 + self.headers.len() * AVERAGE_HEADER_SIZE);
write!(
Writer(dst),
"{} {} {}",
self.method,
self.uri.path_and_query().map(|u| u.as_str()).unwrap_or("/"),
match self.version {
Version::HTTP_09 => "HTTP/0.9",
Version::HTTP_10 => "HTTP/1.0",
Version::HTTP_11 => "HTTP/1.1",
Version::HTTP_2 => "HTTP/2.0",
}
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
}
}
impl<T: MessageType> MessageEncoder<T> {
/// Encode message
pub fn encode_chunk(&mut self, msg: &[u8], buf: &mut BytesMut) -> io::Result<bool> {
self.te.encode(msg, buf)
}
/// Encode eof
pub fn encode_eof(&mut self, buf: &mut BytesMut) -> io::Result<()> {
self.te.encode_eof(buf)
}
pub fn encode(
&mut self,
dst: &mut BytesMut,
message: &mut T,
head: bool,
stream: bool,
version: Version,
length: BodySize,
ctype: ConnectionType,
config: &ServiceConfig,
) -> io::Result<()> {
// transfer encoding
if !head {
self.te = match length {
BodySize::Empty => TransferEncoding::empty(),
BodySize::Sized(len) => TransferEncoding::length(len as u64),
BodySize::Sized64(len) => TransferEncoding::length(len),
BodySize::Stream => {
if message.chunked() && !stream {
TransferEncoding::chunked()
} else {
TransferEncoding::eof()
}
}
BodySize::None => TransferEncoding::empty(),
};
} else {
self.te = TransferEncoding::empty();
}
message.encode_status(dst)?;
message.encode_headers(dst, version, length, ctype, config)
}
}
/// Encoders to handle different Transfer-Encodings.
#[derive(Debug)]
pub(crate) struct TransferEncoding {
kind: TransferEncodingKind,
}
#[derive(Debug, PartialEq, Clone)]
enum TransferEncodingKind {
/// An Encoder for when Transfer-Encoding includes `chunked`.
Chunked(bool),
/// An Encoder for when Content-Length is set.
///
/// Enforces that the body is not longer than the Content-Length header.
Length(u64),
/// An Encoder for when Content-Length is not known.
///
/// Application decides when to stop writing.
Eof,
}
impl TransferEncoding {
#[inline]
pub fn empty() -> TransferEncoding {
TransferEncoding {
kind: TransferEncodingKind::Length(0),
}
}
#[inline]
pub fn eof() -> TransferEncoding {
TransferEncoding {
kind: TransferEncodingKind::Eof,
}
}
#[inline]
pub fn chunked() -> TransferEncoding {
TransferEncoding {
kind: TransferEncodingKind::Chunked(false),
}
}
#[inline]
pub fn length(len: u64) -> TransferEncoding {
TransferEncoding {
kind: TransferEncodingKind::Length(len),
}
}
/// Encode message. Return `EOF` state of encoder
#[inline]
pub fn encode(&mut self, msg: &[u8], buf: &mut BytesMut) -> io::Result<bool> {
match self.kind {
TransferEncodingKind::Eof => {
let eof = msg.is_empty();
buf.extend_from_slice(msg);
Ok(eof)
}
TransferEncodingKind::Chunked(ref mut eof) => {
if *eof {
return Ok(true);
}
if msg.is_empty() {
*eof = true;
buf.extend_from_slice(b"0\r\n\r\n");
} else {
writeln!(Writer(buf), "{:X}\r", msg.len())
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
buf.reserve(msg.len() + 2);
buf.extend_from_slice(msg);
buf.extend_from_slice(b"\r\n");
}
Ok(*eof)
}
TransferEncodingKind::Length(ref mut remaining) => {
if *remaining > 0 {
if msg.is_empty() {
return Ok(*remaining == 0);
}
let len = cmp::min(*remaining, msg.len() as u64);
buf.extend_from_slice(&msg[..len as usize]);
*remaining -= len as u64;
Ok(*remaining == 0)
} else {
Ok(true)
}
}
}
}
/// Encode eof. Return `EOF` state of encoder
#[inline]
pub fn encode_eof(&mut self, buf: &mut BytesMut) -> io::Result<()> {
match self.kind {
TransferEncodingKind::Eof => Ok(()),
TransferEncodingKind::Length(rem) => {
if rem != 0 {
Err(io::Error::new(io::ErrorKind::UnexpectedEof, ""))
} else {
Ok(())
}
}
TransferEncodingKind::Chunked(ref mut eof) => {
if !*eof {
*eof = true;
buf.extend_from_slice(b"0\r\n\r\n");
}
Ok(())
}
}
}
}
struct Writer<'a>(pub &'a mut BytesMut);
impl<'a> io::Write for Writer<'a> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.extend_from_slice(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use bytes::Bytes;
#[test]
fn test_chunked_te() {
let mut bytes = BytesMut::new();
let mut enc = TransferEncoding::chunked();
{
assert!(!enc.encode(b"test", &mut bytes).ok().unwrap());
assert!(enc.encode(b"", &mut bytes).ok().unwrap());
}
assert_eq!(
bytes.take().freeze(),
Bytes::from_static(b"4\r\ntest\r\n0\r\n\r\n")
);
}
}

View File

@ -0,0 +1,36 @@
use actix_service::{NewService, Service};
use futures::future::{ok, FutureResult};
use futures::{Async, Poll};
use crate::error::Error;
use crate::request::Request;
pub struct ExpectHandler;
impl NewService for ExpectHandler {
type Request = Request;
type Response = Request;
type Error = Error;
type Service = ExpectHandler;
type InitError = Error;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self, _: &()) -> Self::Future {
ok(ExpectHandler)
}
}
impl Service for ExpectHandler {
type Request = Request;
type Response = Request;
type Error = Error;
type Future = FutureResult<Self::Response, Self::Error>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, req: Request) -> Self::Future {
ok(req)
}
}

85
actix-http/src/h1/mod.rs Normal file
View File

@ -0,0 +1,85 @@
//! HTTP/1 implementation
use bytes::{Bytes, BytesMut};
mod client;
mod codec;
mod decoder;
mod dispatcher;
mod encoder;
mod expect;
mod payload;
mod service;
mod upgrade;
mod utils;
pub use self::client::{ClientCodec, ClientPayloadCodec};
pub use self::codec::Codec;
pub use self::dispatcher::Dispatcher;
pub use self::expect::ExpectHandler;
pub use self::payload::Payload;
pub use self::service::{H1Service, H1ServiceHandler, OneRequest};
pub use self::upgrade::UpgradeHandler;
pub use self::utils::SendResponse;
#[derive(Debug)]
/// Codec message
pub enum Message<T> {
/// Http message
Item(T),
/// Payload chunk
Chunk(Option<Bytes>),
}
impl<T> From<T> for Message<T> {
fn from(item: T) -> Self {
Message::Item(item)
}
}
/// Incoming request type
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum MessageType {
None,
Payload,
Stream,
}
const LW: usize = 2 * 1024;
const HW: usize = 32 * 1024;
pub(crate) fn reserve_readbuf(src: &mut BytesMut) {
let cap = src.capacity();
if cap < LW {
src.reserve(HW - cap);
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::request::Request;
impl Message<Request> {
pub fn message(self) -> Request {
match self {
Message::Item(req) => req,
_ => panic!("error"),
}
}
pub fn chunk(self) -> Bytes {
match self {
Message::Chunk(Some(data)) => data,
_ => panic!("error"),
}
}
pub fn eof(self) -> bool {
match self {
Message::Chunk(None) => true,
Message::Chunk(Some(_)) => false,
_ => panic!("error"),
}
}
}
}

View File

@ -0,0 +1,254 @@
//! Payload stream
use std::cell::RefCell;
use std::collections::VecDeque;
use std::rc::{Rc, Weak};
use bytes::Bytes;
use futures::task::current as current_task;
use futures::task::Task;
use futures::{Async, Poll, Stream};
use crate::error::PayloadError;
/// max buffer size 32k
pub(crate) const MAX_BUFFER_SIZE: usize = 32_768;
#[derive(Debug, PartialEq)]
pub enum PayloadStatus {
Read,
Pause,
Dropped,
}
/// Buffered stream of bytes chunks
///
/// Payload stores chunks in a vector. First chunk can be received with
/// `.readany()` method. Payload stream is not thread safe. Payload does not
/// notify current task when new data is available.
///
/// Payload stream can be used as `Response` body stream.
#[derive(Debug)]
pub struct Payload {
inner: Rc<RefCell<Inner>>,
}
impl Payload {
/// Create payload stream.
///
/// This method construct two objects responsible for bytes stream
/// generation.
///
/// * `PayloadSender` - *Sender* side of the stream
///
/// * `Payload` - *Receiver* side of the stream
pub fn create(eof: bool) -> (PayloadSender, Payload) {
let shared = Rc::new(RefCell::new(Inner::new(eof)));
(
PayloadSender {
inner: Rc::downgrade(&shared),
},
Payload { inner: shared },
)
}
/// Create empty payload
#[doc(hidden)]
pub fn empty() -> Payload {
Payload {
inner: Rc::new(RefCell::new(Inner::new(true))),
}
}
/// Length of the data in this payload
#[cfg(test)]
pub fn len(&self) -> usize {
self.inner.borrow().len()
}
/// Is payload empty
#[cfg(test)]
pub fn is_empty(&self) -> bool {
self.inner.borrow().len() == 0
}
/// Put unused data back to payload
#[inline]
pub fn unread_data(&mut self, data: Bytes) {
self.inner.borrow_mut().unread_data(data);
}
}
impl Stream for Payload {
type Item = Bytes;
type Error = PayloadError;
#[inline]
fn poll(&mut self) -> Poll<Option<Bytes>, PayloadError> {
self.inner.borrow_mut().readany()
}
}
/// Sender part of the payload stream
pub struct PayloadSender {
inner: Weak<RefCell<Inner>>,
}
impl PayloadSender {
#[inline]
pub fn set_error(&mut self, err: PayloadError) {
if let Some(shared) = self.inner.upgrade() {
shared.borrow_mut().set_error(err)
}
}
#[inline]
pub fn feed_eof(&mut self) {
if let Some(shared) = self.inner.upgrade() {
shared.borrow_mut().feed_eof()
}
}
#[inline]
pub fn feed_data(&mut self, data: Bytes) {
if let Some(shared) = self.inner.upgrade() {
shared.borrow_mut().feed_data(data)
}
}
#[inline]
pub fn need_read(&self) -> PayloadStatus {
// we check need_read only if Payload (other side) is alive,
// otherwise always return true (consume payload)
if let Some(shared) = self.inner.upgrade() {
if shared.borrow().need_read {
PayloadStatus::Read
} else {
#[cfg(not(test))]
{
if shared.borrow_mut().io_task.is_none() {
shared.borrow_mut().io_task = Some(current_task());
}
}
PayloadStatus::Pause
}
} else {
PayloadStatus::Dropped
}
}
}
#[derive(Debug)]
struct Inner {
len: usize,
eof: bool,
err: Option<PayloadError>,
need_read: bool,
items: VecDeque<Bytes>,
task: Option<Task>,
io_task: Option<Task>,
}
impl Inner {
fn new(eof: bool) -> Self {
Inner {
eof,
len: 0,
err: None,
items: VecDeque::new(),
need_read: true,
task: None,
io_task: None,
}
}
#[inline]
fn set_error(&mut self, err: PayloadError) {
self.err = Some(err);
}
#[inline]
fn feed_eof(&mut self) {
self.eof = true;
}
#[inline]
fn feed_data(&mut self, data: Bytes) {
self.len += data.len();
self.items.push_back(data);
self.need_read = self.len < MAX_BUFFER_SIZE;
if let Some(task) = self.task.take() {
task.notify()
}
}
#[cfg(test)]
fn len(&self) -> usize {
self.len
}
fn readany(&mut self) -> Poll<Option<Bytes>, PayloadError> {
if let Some(data) = self.items.pop_front() {
self.len -= data.len();
self.need_read = self.len < MAX_BUFFER_SIZE;
if self.need_read && self.task.is_none() && !self.eof {
self.task = Some(current_task());
}
if let Some(task) = self.io_task.take() {
task.notify()
}
Ok(Async::Ready(Some(data)))
} else if let Some(err) = self.err.take() {
Err(err)
} else if self.eof {
Ok(Async::Ready(None))
} else {
self.need_read = true;
#[cfg(not(test))]
{
if self.task.is_none() {
self.task = Some(current_task());
}
if let Some(task) = self.io_task.take() {
task.notify()
}
}
Ok(Async::NotReady)
}
}
fn unread_data(&mut self, data: Bytes) {
self.len += data.len();
self.items.push_front(data);
}
}
#[cfg(test)]
mod tests {
use super::*;
use actix_rt::Runtime;
use futures::future::{lazy, result};
#[test]
fn test_unread_data() {
Runtime::new()
.unwrap()
.block_on(lazy(|| {
let (_, mut payload) = Payload::create(false);
payload.unread_data(Bytes::from("data"));
assert!(!payload.is_empty());
assert_eq!(payload.len(), 4);
assert_eq!(
Async::Ready(Some(Bytes::from("data"))),
payload.poll().ok().unwrap()
);
let res: Result<(), ()> = Ok(());
result(res)
}))
.unwrap();
}
}

View File

@ -0,0 +1,399 @@
use std::fmt;
use std::marker::PhantomData;
use actix_codec::Framed;
use actix_server_config::{Io, IoStream, ServerConfig as SrvConfig};
use actix_service::{IntoNewService, NewService, Service};
use actix_utils::cloneable::CloneableService;
use futures::future::{ok, FutureResult};
use futures::{try_ready, Async, Future, IntoFuture, Poll, Stream};
use crate::body::MessageBody;
use crate::config::{KeepAlive, ServiceConfig};
use crate::error::{DispatchError, Error, ParseError};
use crate::request::Request;
use crate::response::Response;
use super::codec::Codec;
use super::dispatcher::Dispatcher;
use super::{ExpectHandler, Message, UpgradeHandler};
/// `NewService` implementation for HTTP1 transport
pub struct H1Service<T, P, S, B, X = ExpectHandler, U = UpgradeHandler<T>> {
srv: S,
cfg: ServiceConfig,
expect: X,
upgrade: Option<U>,
_t: PhantomData<(T, P, B)>,
}
impl<T, P, S, B> H1Service<T, P, S, B>
where
S: NewService<SrvConfig, Request = Request>,
S::Error: Into<Error>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
{
/// Create new `HttpService` instance with default config.
pub fn new<F: IntoNewService<S, SrvConfig>>(service: F) -> Self {
let cfg = ServiceConfig::new(KeepAlive::Timeout(5), 5000, 0);
H1Service {
cfg,
srv: service.into_new_service(),
expect: ExpectHandler,
upgrade: None,
_t: PhantomData,
}
}
/// Create new `HttpService` instance with config.
pub fn with_config<F: IntoNewService<S, SrvConfig>>(
cfg: ServiceConfig,
service: F,
) -> Self {
H1Service {
cfg,
srv: service.into_new_service(),
expect: ExpectHandler,
upgrade: None,
_t: PhantomData,
}
}
}
impl<T, P, S, B, X, U> H1Service<T, P, S, B, X, U>
where
S: NewService<SrvConfig, Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
S::InitError: fmt::Debug,
B: MessageBody,
{
pub fn expect<X1>(self, expect: X1) -> H1Service<T, P, S, B, X1, U>
where
X1: NewService<Request = Request, Response = Request>,
X1::Error: Into<Error>,
X1::InitError: fmt::Debug,
{
H1Service {
expect,
cfg: self.cfg,
srv: self.srv,
upgrade: self.upgrade,
_t: PhantomData,
}
}
pub fn upgrade<U1>(self, upgrade: Option<U1>) -> H1Service<T, P, S, B, X, U1>
where
U1: NewService<Request = (Request, Framed<T, Codec>), Response = ()>,
U1::Error: fmt::Display,
U1::InitError: fmt::Debug,
{
H1Service {
upgrade,
cfg: self.cfg,
srv: self.srv,
expect: self.expect,
_t: PhantomData,
}
}
}
impl<T, P, S, B, X, U> NewService<SrvConfig> for H1Service<T, P, S, B, X, U>
where
T: IoStream,
S: NewService<SrvConfig, Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
S::InitError: fmt::Debug,
B: MessageBody,
X: NewService<Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
U: NewService<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
U::InitError: fmt::Debug,
{
type Request = Io<T, P>;
type Response = ();
type Error = DispatchError;
type InitError = ();
type Service = H1ServiceHandler<T, P, S::Service, B, X::Service, U::Service>;
type Future = H1ServiceResponse<T, P, S, B, X, U>;
fn new_service(&self, cfg: &SrvConfig) -> Self::Future {
H1ServiceResponse {
fut: self.srv.new_service(cfg).into_future(),
fut_ex: Some(self.expect.new_service(&())),
fut_upg: self.upgrade.as_ref().map(|f| f.new_service(&())),
expect: None,
upgrade: None,
cfg: Some(self.cfg.clone()),
_t: PhantomData,
}
}
}
#[doc(hidden)]
pub struct H1ServiceResponse<T, P, S, B, X, U>
where
S: NewService<SrvConfig, Request = Request>,
S::Error: Into<Error>,
S::InitError: fmt::Debug,
X: NewService<Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
U: NewService<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
U::InitError: fmt::Debug,
{
fut: S::Future,
fut_ex: Option<X::Future>,
fut_upg: Option<U::Future>,
expect: Option<X::Service>,
upgrade: Option<U::Service>,
cfg: Option<ServiceConfig>,
_t: PhantomData<(T, P, B)>,
}
impl<T, P, S, B, X, U> Future for H1ServiceResponse<T, P, S, B, X, U>
where
T: IoStream,
S: NewService<SrvConfig, Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
S::InitError: fmt::Debug,
B: MessageBody,
X: NewService<Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
U: NewService<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
U::InitError: fmt::Debug,
{
type Item = H1ServiceHandler<T, P, S::Service, B, X::Service, U::Service>;
type Error = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if let Some(ref mut fut) = self.fut_ex {
let expect = try_ready!(fut
.poll()
.map_err(|e| log::error!("Init http service error: {:?}", e)));
self.expect = Some(expect);
self.fut_ex.take();
}
if let Some(ref mut fut) = self.fut_upg {
let upgrade = try_ready!(fut
.poll()
.map_err(|e| log::error!("Init http service error: {:?}", e)));
self.upgrade = Some(upgrade);
self.fut_ex.take();
}
let service = try_ready!(self
.fut
.poll()
.map_err(|e| log::error!("Init http service error: {:?}", e)));
Ok(Async::Ready(H1ServiceHandler::new(
self.cfg.take().unwrap(),
service,
self.expect.take().unwrap(),
self.upgrade.take(),
)))
}
}
/// `Service` implementation for HTTP1 transport
pub struct H1ServiceHandler<T, P, S, B, X, U> {
srv: CloneableService<S>,
expect: CloneableService<X>,
upgrade: Option<CloneableService<U>>,
cfg: ServiceConfig,
_t: PhantomData<(T, P, B)>,
}
impl<T, P, S, B, X, U> H1ServiceHandler<T, P, S, B, X, U>
where
S: Service<Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
fn new(
cfg: ServiceConfig,
srv: S,
expect: X,
upgrade: Option<U>,
) -> H1ServiceHandler<T, P, S, B, X, U> {
H1ServiceHandler {
srv: CloneableService::new(srv),
expect: CloneableService::new(expect),
upgrade: upgrade.map(|s| CloneableService::new(s)),
cfg,
_t: PhantomData,
}
}
}
impl<T, P, S, B, X, U> Service for H1ServiceHandler<T, P, S, B, X, U>
where
T: IoStream,
S: Service<Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
type Request = Io<T, P>;
type Response = ();
type Error = DispatchError;
type Future = Dispatcher<T, S, B, X, U>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
let ready = self
.expect
.poll_ready()
.map_err(|e| {
let e = e.into();
log::error!("Http service readiness error: {:?}", e);
DispatchError::Service(e)
})?
.is_ready();
let ready = self
.srv
.poll_ready()
.map_err(|e| {
let e = e.into();
log::error!("Http service readiness error: {:?}", e);
DispatchError::Service(e)
})?
.is_ready()
&& ready;
if ready {
Ok(Async::Ready(()))
} else {
Ok(Async::NotReady)
}
}
fn call(&mut self, req: Self::Request) -> Self::Future {
Dispatcher::new(
req.into_parts().0,
self.cfg.clone(),
self.srv.clone(),
self.expect.clone(),
self.upgrade.clone(),
)
}
}
/// `NewService` implementation for `OneRequestService` service
#[derive(Default)]
pub struct OneRequest<T, P> {
config: ServiceConfig,
_t: PhantomData<(T, P)>,
}
impl<T, P> OneRequest<T, P>
where
T: IoStream,
{
/// Create new `H1SimpleService` instance.
pub fn new() -> Self {
OneRequest {
config: ServiceConfig::default(),
_t: PhantomData,
}
}
}
impl<T, P> NewService<SrvConfig> for OneRequest<T, P>
where
T: IoStream,
{
type Request = Io<T, P>;
type Response = (Request, Framed<T, Codec>);
type Error = ParseError;
type InitError = ();
type Service = OneRequestService<T, P>;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self, _: &SrvConfig) -> Self::Future {
ok(OneRequestService {
config: self.config.clone(),
_t: PhantomData,
})
}
}
/// `Service` implementation for HTTP1 transport. Reads one request and returns
/// request and framed object.
pub struct OneRequestService<T, P> {
config: ServiceConfig,
_t: PhantomData<(T, P)>,
}
impl<T, P> Service for OneRequestService<T, P>
where
T: IoStream,
{
type Request = Io<T, P>;
type Response = (Request, Framed<T, Codec>);
type Error = ParseError;
type Future = OneRequestServiceResponse<T>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, req: Self::Request) -> Self::Future {
OneRequestServiceResponse {
framed: Some(Framed::new(
req.into_parts().0,
Codec::new(self.config.clone()),
)),
}
}
}
#[doc(hidden)]
pub struct OneRequestServiceResponse<T>
where
T: IoStream,
{
framed: Option<Framed<T, Codec>>,
}
impl<T> Future for OneRequestServiceResponse<T>
where
T: IoStream,
{
type Item = (Request, Framed<T, Codec>);
type Error = ParseError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.framed.as_mut().unwrap().poll()? {
Async::Ready(Some(req)) => match req {
Message::Item(req) => {
Ok(Async::Ready((req, self.framed.take().unwrap())))
}
Message::Chunk(_) => unreachable!("Something is wrong"),
},
Async::Ready(None) => Err(ParseError::Incomplete),
Async::NotReady => Ok(Async::NotReady),
}
}
}

View File

@ -0,0 +1,40 @@
use std::marker::PhantomData;
use actix_codec::Framed;
use actix_service::{NewService, Service};
use futures::future::FutureResult;
use futures::{Async, Poll};
use crate::error::Error;
use crate::h1::Codec;
use crate::request::Request;
pub struct UpgradeHandler<T>(PhantomData<T>);
impl<T> NewService for UpgradeHandler<T> {
type Request = (Request, Framed<T, Codec>);
type Response = ();
type Error = Error;
type Service = UpgradeHandler<T>;
type InitError = Error;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self, _: &()) -> Self::Future {
unimplemented!()
}
}
impl<T> Service for UpgradeHandler<T> {
type Request = (Request, Framed<T, Codec>);
type Response = ();
type Error = Error;
type Future = FutureResult<Self::Response, Self::Error>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, _: Self::Request) -> Self::Future {
unimplemented!()
}
}

View File

@ -0,0 +1,92 @@
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use futures::{Async, Future, Poll, Sink};
use crate::body::{BodySize, MessageBody, ResponseBody};
use crate::error::Error;
use crate::h1::{Codec, Message};
use crate::response::Response;
/// Send http/1 response
pub struct SendResponse<T, B> {
res: Option<Message<(Response<()>, BodySize)>>,
body: Option<ResponseBody<B>>,
framed: Option<Framed<T, Codec>>,
}
impl<T, B> SendResponse<T, B>
where
B: MessageBody,
{
pub fn new(framed: Framed<T, Codec>, response: Response<B>) -> Self {
let (res, body) = response.into_parts();
SendResponse {
res: Some((res, body.size()).into()),
body: Some(body),
framed: Some(framed),
}
}
}
impl<T, B> Future for SendResponse<T, B>
where
T: AsyncRead + AsyncWrite,
B: MessageBody,
{
type Item = Framed<T, Codec>;
type Error = Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
let mut body_ready = self.body.is_some();
let framed = self.framed.as_mut().unwrap();
// send body
if self.res.is_none() && self.body.is_some() {
while body_ready && self.body.is_some() && !framed.is_write_buf_full() {
match self.body.as_mut().unwrap().poll_next()? {
Async::Ready(item) => {
// body is done
if item.is_none() {
let _ = self.body.take();
}
framed.force_send(Message::Chunk(item))?;
}
Async::NotReady => body_ready = false,
}
}
}
// flush write buffer
if !framed.is_write_buf_empty() {
match framed.poll_complete()? {
Async::Ready(_) => {
if body_ready {
continue;
} else {
return Ok(Async::NotReady);
}
}
Async::NotReady => return Ok(Async::NotReady),
}
}
// send response
if let Some(res) = self.res.take() {
framed.force_send(res)?;
continue;
}
if self.body.is_some() {
if body_ready {
continue;
} else {
return Ok(Async::NotReady);
}
} else {
break;
}
}
Ok(Async::Ready(self.framed.take().unwrap()))
}
}

View File

@ -0,0 +1,325 @@
use std::collections::VecDeque;
use std::marker::PhantomData;
use std::time::Instant;
use std::{fmt, mem, net};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_server_config::IoStream;
use actix_service::Service;
use actix_utils::cloneable::CloneableService;
use bitflags::bitflags;
use bytes::{Bytes, BytesMut};
use futures::{try_ready, Async, Future, Poll, Sink, Stream};
use h2::server::{Connection, SendResponse};
use h2::{RecvStream, SendStream};
use http::header::{
HeaderValue, ACCEPT_ENCODING, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING,
};
use http::HttpTryFrom;
use log::{debug, error, trace};
use tokio_timer::Delay;
use crate::body::{Body, BodySize, MessageBody, ResponseBody};
use crate::config::ServiceConfig;
use crate::error::{DispatchError, Error, ParseError, PayloadError, ResponseError};
use crate::message::ResponseHead;
use crate::payload::Payload;
use crate::request::Request;
use crate::response::Response;
const CHUNK_SIZE: usize = 16_384;
/// Dispatcher for HTTP/2 protocol
pub struct Dispatcher<T: IoStream, S: Service<Request = Request>, B: MessageBody> {
service: CloneableService<S>,
connection: Connection<T, Bytes>,
config: ServiceConfig,
peer_addr: Option<net::SocketAddr>,
ka_expire: Instant,
ka_timer: Option<Delay>,
_t: PhantomData<B>,
}
impl<T, S, B> Dispatcher<T, S, B>
where
T: IoStream,
S: Service<Request = Request>,
S::Error: Into<Error>,
S::Future: 'static,
S::Response: Into<Response<B>>,
B: MessageBody + 'static,
{
pub fn new(
service: CloneableService<S>,
connection: Connection<T, Bytes>,
config: ServiceConfig,
timeout: Option<Delay>,
peer_addr: Option<net::SocketAddr>,
) -> Self {
// let keepalive = config.keep_alive_enabled();
// let flags = if keepalive {
// Flags::KEEPALIVE | Flags::KEEPALIVE_ENABLED
// } else {
// Flags::empty()
// };
// keep-alive timer
let (ka_expire, ka_timer) = if let Some(delay) = timeout {
(delay.deadline(), Some(delay))
} else if let Some(delay) = config.keep_alive_timer() {
(delay.deadline(), Some(delay))
} else {
(config.now(), None)
};
Dispatcher {
service,
config,
peer_addr,
connection,
ka_expire,
ka_timer,
_t: PhantomData,
}
}
}
impl<T, S, B> Future for Dispatcher<T, S, B>
where
T: IoStream,
S: Service<Request = Request>,
S::Error: Into<Error>,
S::Future: 'static,
S::Response: Into<Response<B>>,
B: MessageBody + 'static,
{
type Item = ();
type Error = DispatchError;
#[inline]
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
match self.connection.poll()? {
Async::Ready(None) => return Ok(Async::Ready(())),
Async::Ready(Some((req, res))) => {
// update keep-alive expire
if self.ka_timer.is_some() {
if let Some(expire) = self.config.keep_alive_expire() {
self.ka_expire = expire;
}
}
let (parts, body) = req.into_parts();
let mut req = Request::with_payload(body.into());
let head = &mut req.head_mut();
head.uri = parts.uri;
head.method = parts.method;
head.version = parts.version;
head.headers = parts.headers.into();
head.peer_addr = self.peer_addr;
tokio_current_thread::spawn(ServiceResponse::<S::Future, B> {
state: ServiceResponseState::ServiceCall(
self.service.call(req),
Some(res),
),
config: self.config.clone(),
buffer: None,
})
}
Async::NotReady => return Ok(Async::NotReady),
}
}
}
}
struct ServiceResponse<F, B> {
state: ServiceResponseState<F, B>,
config: ServiceConfig,
buffer: Option<Bytes>,
}
enum ServiceResponseState<F, B> {
ServiceCall(F, Option<SendResponse<Bytes>>),
SendPayload(SendStream<Bytes>, ResponseBody<B>),
}
impl<F, B> ServiceResponse<F, B>
where
F: Future,
F::Error: Into<Error>,
F::Item: Into<Response<B>>,
B: MessageBody + 'static,
{
fn prepare_response(
&self,
head: &ResponseHead,
size: &mut BodySize,
) -> http::Response<()> {
let mut has_date = false;
let mut skip_len = size != &BodySize::Stream;
let mut res = http::Response::new(());
*res.status_mut() = head.status;
*res.version_mut() = http::Version::HTTP_2;
// Content length
match head.status {
http::StatusCode::NO_CONTENT
| http::StatusCode::CONTINUE
| http::StatusCode::PROCESSING => *size = BodySize::None,
http::StatusCode::SWITCHING_PROTOCOLS => {
skip_len = true;
*size = BodySize::Stream;
}
_ => (),
}
let _ = match size {
BodySize::None | BodySize::Stream => None,
BodySize::Empty => res
.headers_mut()
.insert(CONTENT_LENGTH, HeaderValue::from_static("0")),
BodySize::Sized(len) => res.headers_mut().insert(
CONTENT_LENGTH,
HeaderValue::try_from(format!("{}", len)).unwrap(),
),
BodySize::Sized64(len) => res.headers_mut().insert(
CONTENT_LENGTH,
HeaderValue::try_from(format!("{}", len)).unwrap(),
),
};
// copy headers
for (key, value) in head.headers.iter() {
match *key {
CONNECTION | TRANSFER_ENCODING => continue, // http2 specific
CONTENT_LENGTH if skip_len => continue,
DATE => has_date = true,
_ => (),
}
res.headers_mut().append(key, value.clone());
}
// set date header
if !has_date {
let mut bytes = BytesMut::with_capacity(29);
self.config.set_date_header(&mut bytes);
res.headers_mut()
.insert(DATE, HeaderValue::try_from(bytes.freeze()).unwrap());
}
res
}
}
impl<F, B> Future for ServiceResponse<F, B>
where
F: Future,
F::Error: Into<Error>,
F::Item: Into<Response<B>>,
B: MessageBody + 'static,
{
type Item = ();
type Error = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.state {
ServiceResponseState::ServiceCall(ref mut call, ref mut send) => {
match call.poll() {
Ok(Async::Ready(res)) => {
let (res, body) = res.into().replace_body(());
let mut send = send.take().unwrap();
let mut size = body.size();
let h2_res = self.prepare_response(res.head(), &mut size);
let stream =
send.send_response(h2_res, size.is_eof()).map_err(|e| {
trace!("Error sending h2 response: {:?}", e);
})?;
if size.is_eof() {
Ok(Async::Ready(()))
} else {
self.state = ServiceResponseState::SendPayload(stream, body);
self.poll()
}
}
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(_e) => {
let res: Response = Response::InternalServerError().finish();
let (res, body) = res.replace_body(());
let mut send = send.take().unwrap();
let mut size = body.size();
let h2_res = self.prepare_response(res.head(), &mut size);
let stream =
send.send_response(h2_res, size.is_eof()).map_err(|e| {
trace!("Error sending h2 response: {:?}", e);
})?;
if size.is_eof() {
Ok(Async::Ready(()))
} else {
self.state = ServiceResponseState::SendPayload(
stream,
body.into_body(),
);
self.poll()
}
}
}
}
ServiceResponseState::SendPayload(ref mut stream, ref mut body) => loop {
loop {
if let Some(ref mut buffer) = self.buffer {
match stream.poll_capacity().map_err(|e| warn!("{:?}", e))? {
Async::NotReady => return Ok(Async::NotReady),
Async::Ready(None) => return Ok(Async::Ready(())),
Async::Ready(Some(cap)) => {
let len = buffer.len();
let bytes = buffer.split_to(std::cmp::min(cap, len));
if let Err(e) = stream.send_data(bytes, false) {
warn!("{:?}", e);
return Err(());
} else if !buffer.is_empty() {
let cap = std::cmp::min(buffer.len(), CHUNK_SIZE);
stream.reserve_capacity(cap);
} else {
self.buffer.take();
}
}
}
} else {
match body.poll_next() {
Ok(Async::NotReady) => {
return Ok(Async::NotReady);
}
Ok(Async::Ready(None)) => {
if let Err(e) = stream.send_data(Bytes::new(), true) {
warn!("{:?}", e);
return Err(());
} else {
return Ok(Async::Ready(()));
}
}
Ok(Async::Ready(Some(chunk))) => {
stream.reserve_capacity(std::cmp::min(
chunk.len(),
CHUNK_SIZE,
));
self.buffer = Some(chunk);
}
Err(e) => {
error!("Response payload stream error: {:?}", e);
return Err(());
}
}
}
}
},
}
}
}

46
actix-http/src/h2/mod.rs Normal file
View File

@ -0,0 +1,46 @@
#![allow(dead_code, unused_imports)]
use std::fmt;
use bytes::Bytes;
use futures::{Async, Poll, Stream};
use h2::RecvStream;
mod dispatcher;
mod service;
pub use self::dispatcher::Dispatcher;
pub use self::service::H2Service;
use crate::error::PayloadError;
/// H2 receive stream
pub struct Payload {
pl: RecvStream,
}
impl Payload {
pub(crate) fn new(pl: RecvStream) -> Self {
Self { pl }
}
}
impl Stream for Payload {
type Item = Bytes;
type Error = PayloadError;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
match self.pl.poll() {
Ok(Async::Ready(Some(chunk))) => {
let len = chunk.len();
if let Err(err) = self.pl.release_capacity().release_capacity(len) {
Err(err.into())
} else {
Ok(Async::Ready(Some(chunk)))
}
}
Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(err) => Err(err.into()),
}
}
}

View File

@ -0,0 +1,241 @@
use std::fmt::Debug;
use std::marker::PhantomData;
use std::{io, net};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_server_config::{Io, IoStream, ServerConfig as SrvConfig};
use actix_service::{IntoNewService, NewService, Service};
use actix_utils::cloneable::CloneableService;
use bytes::Bytes;
use futures::future::{ok, FutureResult};
use futures::{try_ready, Async, Future, IntoFuture, Poll, Stream};
use h2::server::{self, Connection, Handshake};
use h2::RecvStream;
use log::error;
use crate::body::MessageBody;
use crate::config::{KeepAlive, ServiceConfig};
use crate::error::{DispatchError, Error, ParseError, ResponseError};
use crate::payload::Payload;
use crate::request::Request;
use crate::response::Response;
use super::dispatcher::Dispatcher;
/// `NewService` implementation for HTTP2 transport
pub struct H2Service<T, P, S, B> {
srv: S,
cfg: ServiceConfig,
_t: PhantomData<(T, P, B)>,
}
impl<T, P, S, B> H2Service<T, P, S, B>
where
S: NewService<SrvConfig, Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
{
/// Create new `HttpService` instance.
pub fn new<F: IntoNewService<S, SrvConfig>>(service: F) -> Self {
let cfg = ServiceConfig::new(KeepAlive::Timeout(5), 5000, 0);
H2Service {
cfg,
srv: service.into_new_service(),
_t: PhantomData,
}
}
/// Create new `HttpService` instance with config.
pub fn with_config<F: IntoNewService<S, SrvConfig>>(
cfg: ServiceConfig,
service: F,
) -> Self {
H2Service {
cfg,
srv: service.into_new_service(),
_t: PhantomData,
}
}
}
impl<T, P, S, B> NewService<SrvConfig> for H2Service<T, P, S, B>
where
T: IoStream,
S: NewService<SrvConfig, Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
{
type Request = Io<T, P>;
type Response = ();
type Error = DispatchError;
type InitError = S::InitError;
type Service = H2ServiceHandler<T, P, S::Service, B>;
type Future = H2ServiceResponse<T, P, S, B>;
fn new_service(&self, cfg: &SrvConfig) -> Self::Future {
H2ServiceResponse {
fut: self.srv.new_service(cfg).into_future(),
cfg: Some(self.cfg.clone()),
_t: PhantomData,
}
}
}
#[doc(hidden)]
pub struct H2ServiceResponse<T, P, S: NewService<SrvConfig, Request = Request>, B> {
fut: <S::Future as IntoFuture>::Future,
cfg: Option<ServiceConfig>,
_t: PhantomData<(T, P, B)>,
}
impl<T, P, S, B> Future for H2ServiceResponse<T, P, S, B>
where
T: IoStream,
S: NewService<SrvConfig, Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
{
type Item = H2ServiceHandler<T, P, S::Service, B>;
type Error = S::InitError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let service = try_ready!(self.fut.poll());
Ok(Async::Ready(H2ServiceHandler::new(
self.cfg.take().unwrap(),
service,
)))
}
}
/// `Service` implementation for http/2 transport
pub struct H2ServiceHandler<T, P, S, B> {
srv: CloneableService<S>,
cfg: ServiceConfig,
_t: PhantomData<(T, P, B)>,
}
impl<T, P, S, B> H2ServiceHandler<T, P, S, B>
where
S: Service<Request = Request>,
S::Error: Into<Error>,
S::Future: 'static,
S::Response: Into<Response<B>>,
B: MessageBody + 'static,
{
fn new(cfg: ServiceConfig, srv: S) -> H2ServiceHandler<T, P, S, B> {
H2ServiceHandler {
cfg,
srv: CloneableService::new(srv),
_t: PhantomData,
}
}
}
impl<T, P, S, B> Service for H2ServiceHandler<T, P, S, B>
where
T: IoStream,
S: Service<Request = Request>,
S::Error: Into<Error>,
S::Future: 'static,
S::Response: Into<Response<B>>,
B: MessageBody + 'static,
{
type Request = Io<T, P>;
type Response = ();
type Error = DispatchError;
type Future = H2ServiceHandlerResponse<T, S, B>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.srv.poll_ready().map_err(|e| {
let e = e.into();
error!("Service readiness error: {:?}", e);
DispatchError::Service(e)
})
}
fn call(&mut self, req: Self::Request) -> Self::Future {
let io = req.into_parts().0;
let peer_addr = io.peer_addr();
H2ServiceHandlerResponse {
state: State::Handshake(
Some(self.srv.clone()),
Some(self.cfg.clone()),
peer_addr,
server::handshake(io),
),
}
}
}
enum State<T: IoStream, S: Service<Request = Request>, B: MessageBody>
where
S::Future: 'static,
{
Incoming(Dispatcher<T, S, B>),
Handshake(
Option<CloneableService<S>>,
Option<ServiceConfig>,
Option<net::SocketAddr>,
Handshake<T, Bytes>,
),
}
pub struct H2ServiceHandlerResponse<T, S, B>
where
T: IoStream,
S: Service<Request = Request>,
S::Error: Into<Error>,
S::Future: 'static,
S::Response: Into<Response<B>>,
B: MessageBody + 'static,
{
state: State<T, S, B>,
}
impl<T, S, B> Future for H2ServiceHandlerResponse<T, S, B>
where
T: IoStream,
S: Service<Request = Request>,
S::Error: Into<Error>,
S::Future: 'static,
S::Response: Into<Response<B>>,
B: MessageBody,
{
type Item = ();
type Error = DispatchError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.state {
State::Incoming(ref mut disp) => disp.poll(),
State::Handshake(
ref mut srv,
ref mut config,
ref peer_addr,
ref mut handshake,
) => match handshake.poll() {
Ok(Async::Ready(conn)) => {
self.state = State::Incoming(Dispatcher::new(
srv.take().unwrap(),
conn,
config.take().unwrap(),
None,
peer_addr.clone(),
));
self.poll()
}
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(err) => {
trace!("H2 handshake error: {}", err);
Err(err.into())
}
},
}
}
}

View File

@ -1,6 +1,7 @@
use header::{qitem, QualityItem};
use http::header as http;
use mime::{self, Mime};
use mime::Mime;
use crate::header::{qitem, QualityItem};
use crate::http::header;
header! {
/// `Accept` header, defined in [RFC7231](http://tools.ietf.org/html/rfc7231#section-5.3.2)
@ -30,13 +31,13 @@ header! {
///
/// # Examples
/// ```rust
/// # extern crate actix_web;
/// # extern crate actix_http;
/// extern crate mime;
/// use actix_web::HttpResponse;
/// use actix_web::http::header::{Accept, qitem};
/// use actix_http::Response;
/// use actix_http::http::header::{Accept, qitem};
///
/// # fn main() {
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
///
/// builder.set(
/// Accept(vec![
@ -47,13 +48,13 @@ header! {
/// ```
///
/// ```rust
/// # extern crate actix_web;
/// # extern crate actix_http;
/// extern crate mime;
/// use actix_web::HttpResponse;
/// use actix_web::http::header::{Accept, qitem};
/// use actix_http::Response;
/// use actix_http::http::header::{Accept, qitem};
///
/// # fn main() {
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
///
/// builder.set(
/// Accept(vec![
@ -64,13 +65,13 @@ header! {
/// ```
///
/// ```rust
/// # extern crate actix_web;
/// # extern crate actix_http;
/// extern crate mime;
/// use actix_web::HttpResponse;
/// use actix_web::http::header::{Accept, QualityItem, q, qitem};
/// use actix_http::Response;
/// use actix_http::http::header::{Accept, QualityItem, q, qitem};
///
/// # fn main() {
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
///
/// builder.set(
/// Accept(vec![
@ -89,7 +90,7 @@ header! {
/// );
/// # }
/// ```
(Accept, http::ACCEPT) => (QualityItem<Mime>)+
(Accept, header::ACCEPT) => (QualityItem<Mime>)+
test_accept {
// Tests from the RFC
@ -104,8 +105,8 @@ header! {
test2,
vec![b"text/plain; q=0.5, text/html, text/x-dvi; q=0.8, text/x-c"],
Some(HeaderField(vec![
QualityItem::new(TEXT_PLAIN, q(500)),
qitem(TEXT_HTML),
QualityItem::new(mime::TEXT_PLAIN, q(500)),
qitem(mime::TEXT_HTML),
QualityItem::new(
"text/x-dvi".parse().unwrap(),
q(800)),
@ -116,20 +117,20 @@ header! {
test3,
vec![b"text/plain; charset=utf-8"],
Some(Accept(vec![
qitem(TEXT_PLAIN_UTF_8),
qitem(mime::TEXT_PLAIN_UTF_8),
])));
test_header!(
test4,
vec![b"text/plain; charset=utf-8; q=0.5"],
Some(Accept(vec![
QualityItem::new(TEXT_PLAIN_UTF_8,
QualityItem::new(mime::TEXT_PLAIN_UTF_8,
q(500)),
])));
#[test]
fn test_fuzzing1() {
use test::TestRequest;
let req = TestRequest::with_header(super::http::ACCEPT, "chunk#;e").finish();
use crate::test::TestRequest;
let req = TestRequest::with_header(crate::header::ACCEPT, "chunk#;e").finish();
let header = Accept::parse(&req);
assert!(header.is_ok());
}

View File

@ -1,4 +1,4 @@
use header::{Charset, QualityItem, ACCEPT_CHARSET};
use crate::header::{Charset, QualityItem, ACCEPT_CHARSET};
header! {
/// `Accept-Charset` header, defined in
@ -22,24 +22,24 @@ header! {
///
/// # Examples
/// ```rust
/// # extern crate actix_web;
/// use actix_web::HttpResponse;
/// use actix_web::http::header::{AcceptCharset, Charset, qitem};
/// # extern crate actix_http;
/// use actix_http::Response;
/// use actix_http::http::header::{AcceptCharset, Charset, qitem};
///
/// # fn main() {
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
/// builder.set(
/// AcceptCharset(vec![qitem(Charset::Us_Ascii)])
/// );
/// # }
/// ```
/// ```rust
/// # extern crate actix_web;
/// use actix_web::HttpResponse;
/// use actix_web::http::header::{AcceptCharset, Charset, q, QualityItem};
/// # extern crate actix_http;
/// use actix_http::Response;
/// use actix_http::http::header::{AcceptCharset, Charset, q, QualityItem};
///
/// # fn main() {
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
/// builder.set(
/// AcceptCharset(vec![
/// QualityItem::new(Charset::Us_Ascii, q(900)),
@ -49,12 +49,12 @@ header! {
/// # }
/// ```
/// ```rust
/// # extern crate actix_web;
/// use actix_web::HttpResponse;
/// use actix_web::http::header::{AcceptCharset, Charset, qitem};
/// # extern crate actix_http;
/// use actix_http::Response;
/// use actix_http::http::header::{AcceptCharset, Charset, qitem};
///
/// # fn main() {
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
/// builder.set(
/// AcceptCharset(vec![qitem(Charset::Ext("utf-8".to_owned()))])
/// );

View File

@ -1,4 +1,4 @@
use header::{QualityItem, ACCEPT_LANGUAGE};
use crate::header::{QualityItem, ACCEPT_LANGUAGE};
use language_tags::LanguageTag;
header! {
@ -23,13 +23,13 @@ header! {
/// # Examples
///
/// ```rust
/// # extern crate actix_web;
/// # extern crate actix_http;
/// # extern crate language_tags;
/// use actix_web::HttpResponse;
/// use actix_web::http::header::{AcceptLanguage, LanguageTag, qitem};
/// use actix_http::Response;
/// use actix_http::http::header::{AcceptLanguage, LanguageTag, qitem};
///
/// # fn main() {
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
/// let mut langtag: LanguageTag = Default::default();
/// langtag.language = Some("en".to_owned());
/// langtag.region = Some("US".to_owned());
@ -42,13 +42,13 @@ header! {
/// ```
///
/// ```rust
/// # extern crate actix_web;
/// # extern crate actix_http;
/// # #[macro_use] extern crate language_tags;
/// use actix_web::HttpResponse;
/// use actix_web::http::header::{AcceptLanguage, QualityItem, q, qitem};
/// use actix_http::Response;
/// use actix_http::http::header::{AcceptLanguage, QualityItem, q, qitem};
/// #
/// # fn main() {
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
/// builder.set(
/// AcceptLanguage(vec![
/// qitem(langtag!(da)),

View File

@ -24,13 +24,13 @@ header! {
///
/// ```rust
/// # extern crate http;
/// # extern crate actix_web;
/// use actix_web::HttpResponse;
/// use actix_web::http::header::Allow;
/// # extern crate actix_http;
/// use actix_http::Response;
/// use actix_http::http::header::Allow;
/// use http::Method;
///
/// # fn main() {
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
/// builder.set(
/// Allow(vec![Method::GET])
/// );
@ -39,13 +39,13 @@ header! {
///
/// ```rust
/// # extern crate http;
/// # extern crate actix_web;
/// use actix_web::HttpResponse;
/// use actix_web::http::header::Allow;
/// # extern crate actix_http;
/// use actix_http::Response;
/// use actix_http::http::header::Allow;
/// use http::Method;
///
/// # fn main() {
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
/// builder.set(
/// Allow(vec![
/// Method::GET,

View File

@ -1,9 +1,12 @@
use header::{Header, IntoHeaderValue, Writer};
use header::{fmt_comma_delimited, from_comma_delimited};
use http::header;
use std::fmt::{self, Write};
use std::str::FromStr;
use http::header;
use crate::header::{
fmt_comma_delimited, from_comma_delimited, Header, IntoHeaderValue, Writer,
};
/// `Cache-Control` header, defined in [RFC7234](https://tools.ietf.org/html/rfc7234#section-5.2)
///
/// The `Cache-Control` header field is used to specify directives for
@ -26,18 +29,18 @@ use std::str::FromStr;
///
/// # Examples
/// ```rust
/// use actix_web::HttpResponse;
/// use actix_web::http::header::{CacheControl, CacheDirective};
/// use actix_http::Response;
/// use actix_http::http::header::{CacheControl, CacheDirective};
///
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
/// builder.set(CacheControl(vec![CacheDirective::MaxAge(86400u32)]));
/// ```
///
/// ```rust
/// use actix_web::HttpResponse;
/// use actix_web::http::header::{CacheControl, CacheDirective};
/// use actix_http::Response;
/// use actix_http::http::header::{CacheControl, CacheDirective};
///
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
/// builder.set(CacheControl(vec![
/// CacheDirective::NoCache,
/// CacheDirective::Private,
@ -57,15 +60,15 @@ impl Header for CacheControl {
}
#[inline]
fn parse<T>(msg: &T) -> Result<Self, ::error::ParseError>
fn parse<T>(msg: &T) -> Result<Self, crate::error::ParseError>
where
T: ::HttpMessage,
T: crate::HttpMessage,
{
let directives = from_comma_delimited(msg.headers().get_all(Self::name()))?;
let directives = from_comma_delimited(msg.headers().get_all(&Self::name()))?;
if !directives.is_empty() {
Ok(CacheControl(directives))
} else {
Err(::error::ParseError::Header)
Err(crate::error::ParseError::Header)
}
}
}
@ -144,7 +147,7 @@ impl fmt::Display for CacheDirective {
Extension(ref name, None) => &name[..],
Extension(ref name, Some(ref arg)) => {
return write!(f, "{}={}", name, arg)
return write!(f, "{}={}", name, arg);
}
},
f,
@ -188,8 +191,8 @@ impl FromStr for CacheDirective {
#[cfg(test)]
mod tests {
use super::*;
use header::Header;
use test::TestRequest;
use crate::header::Header;
use crate::test::TestRequest;
#[test]
fn test_parse_multiple_headers() {

View File

@ -0,0 +1,918 @@
// # References
//
// "The Content-Disposition Header Field" https://www.ietf.org/rfc/rfc2183.txt
// "The Content-Disposition Header Field in the Hypertext Transfer Protocol (HTTP)" https://www.ietf.org/rfc/rfc6266.txt
// "Returning Values from Forms: multipart/form-data" https://www.ietf.org/rfc/rfc7578.txt
// Browser conformance tests at: http://greenbytes.de/tech/tc2231/
// IANA assignment: http://www.iana.org/assignments/cont-disp/cont-disp.xhtml
use lazy_static::lazy_static;
use regex::Regex;
use std::fmt::{self, Write};
use crate::header::{self, ExtendedValue, Header, IntoHeaderValue, Writer};
/// Split at the index of the first `needle` if it exists or at the end.
fn split_once(haystack: &str, needle: char) -> (&str, &str) {
haystack.find(needle).map_or_else(
|| (haystack, ""),
|sc| {
let (first, last) = haystack.split_at(sc);
(first, last.split_at(1).1)
},
)
}
/// Split at the index of the first `needle` if it exists or at the end, trim the right of the
/// first part and the left of the last part.
fn split_once_and_trim(haystack: &str, needle: char) -> (&str, &str) {
let (first, last) = split_once(haystack, needle);
(first.trim_end(), last.trim_start())
}
/// The implied disposition of the content of the HTTP body.
#[derive(Clone, Debug, PartialEq)]
pub enum DispositionType {
/// Inline implies default processing
Inline,
/// Attachment implies that the recipient should prompt the user to save the response locally,
/// rather than process it normally (as per its media type).
Attachment,
/// Used in *multipart/form-data* as defined in
/// [RFC7578](https://tools.ietf.org/html/rfc7578) to carry the field name and the file name.
FormData,
/// Extension type. Should be handled by recipients the same way as Attachment
Ext(String),
}
impl<'a> From<&'a str> for DispositionType {
fn from(origin: &'a str) -> DispositionType {
if origin.eq_ignore_ascii_case("inline") {
DispositionType::Inline
} else if origin.eq_ignore_ascii_case("attachment") {
DispositionType::Attachment
} else if origin.eq_ignore_ascii_case("form-data") {
DispositionType::FormData
} else {
DispositionType::Ext(origin.to_owned())
}
}
}
/// Parameter in [`ContentDisposition`].
///
/// # Examples
/// ```
/// use actix_http::http::header::DispositionParam;
///
/// let param = DispositionParam::Filename(String::from("sample.txt"));
/// assert!(param.is_filename());
/// assert_eq!(param.as_filename().unwrap(), "sample.txt");
/// ```
#[derive(Clone, Debug, PartialEq)]
pub enum DispositionParam {
/// For [`DispositionType::FormData`] (i.e. *multipart/form-data*), the name of an field from
/// the form.
Name(String),
/// A plain file name.
Filename(String),
/// An extended file name. It must not exist for `ContentType::Formdata` according to
/// [RFC7578 Section 4.2](https://tools.ietf.org/html/rfc7578#section-4.2).
FilenameExt(ExtendedValue),
/// An unrecognized regular parameter as defined in
/// [RFC5987](https://tools.ietf.org/html/rfc5987) as *reg-parameter*, in
/// [RFC6266](https://tools.ietf.org/html/rfc6266) as *token "=" value*. Recipients should
/// ignore unrecognizable parameters.
Unknown(String, String),
/// An unrecognized extended paramater as defined in
/// [RFC5987](https://tools.ietf.org/html/rfc5987) as *ext-parameter*, in
/// [RFC6266](https://tools.ietf.org/html/rfc6266) as *ext-token "=" ext-value*. The single
/// trailling asterisk is not included. Recipients should ignore unrecognizable parameters.
UnknownExt(String, ExtendedValue),
}
impl DispositionParam {
/// Returns `true` if the paramater is [`Name`](DispositionParam::Name).
#[inline]
pub fn is_name(&self) -> bool {
self.as_name().is_some()
}
/// Returns `true` if the paramater is [`Filename`](DispositionParam::Filename).
#[inline]
pub fn is_filename(&self) -> bool {
self.as_filename().is_some()
}
/// Returns `true` if the paramater is [`FilenameExt`](DispositionParam::FilenameExt).
#[inline]
pub fn is_filename_ext(&self) -> bool {
self.as_filename_ext().is_some()
}
/// Returns `true` if the paramater is [`Unknown`](DispositionParam::Unknown) and the `name`
#[inline]
/// matches.
pub fn is_unknown<T: AsRef<str>>(&self, name: T) -> bool {
self.as_unknown(name).is_some()
}
/// Returns `true` if the paramater is [`UnknownExt`](DispositionParam::UnknownExt) and the
/// `name` matches.
#[inline]
pub fn is_unknown_ext<T: AsRef<str>>(&self, name: T) -> bool {
self.as_unknown_ext(name).is_some()
}
/// Returns the name if applicable.
#[inline]
pub fn as_name(&self) -> Option<&str> {
match self {
DispositionParam::Name(ref name) => Some(name.as_str()),
_ => None,
}
}
/// Returns the filename if applicable.
#[inline]
pub fn as_filename(&self) -> Option<&str> {
match self {
DispositionParam::Filename(ref filename) => Some(filename.as_str()),
_ => None,
}
}
/// Returns the filename* if applicable.
#[inline]
pub fn as_filename_ext(&self) -> Option<&ExtendedValue> {
match self {
DispositionParam::FilenameExt(ref value) => Some(value),
_ => None,
}
}
/// Returns the value of the unrecognized regular parameter if it is
/// [`Unknown`](DispositionParam::Unknown) and the `name` matches.
#[inline]
pub fn as_unknown<T: AsRef<str>>(&self, name: T) -> Option<&str> {
match self {
DispositionParam::Unknown(ref ext_name, ref value)
if ext_name.eq_ignore_ascii_case(name.as_ref()) =>
{
Some(value.as_str())
}
_ => None,
}
}
/// Returns the value of the unrecognized extended parameter if it is
/// [`Unknown`](DispositionParam::Unknown) and the `name` matches.
#[inline]
pub fn as_unknown_ext<T: AsRef<str>>(&self, name: T) -> Option<&ExtendedValue> {
match self {
DispositionParam::UnknownExt(ref ext_name, ref value)
if ext_name.eq_ignore_ascii_case(name.as_ref()) =>
{
Some(value)
}
_ => None,
}
}
}
/// A *Content-Disposition* header. It is compatible to be used either as
/// [a response header for the main body](https://mdn.io/Content-Disposition#As_a_response_header_for_the_main_body)
/// as (re)defined in [RFC6266](https://tools.ietf.org/html/rfc6266), or as
/// [a header for a multipart body](https://mdn.io/Content-Disposition#As_a_header_for_a_multipart_body)
/// as (re)defined in [RFC7587](https://tools.ietf.org/html/rfc7578).
///
/// In a regular HTTP response, the *Content-Disposition* response header is a header indicating if
/// the content is expected to be displayed *inline* in the browser, that is, as a Web page or as
/// part of a Web page, or as an attachment, that is downloaded and saved locally, and also can be
/// used to attach additional metadata, such as the filename to use when saving the response payload
/// locally.
///
/// In a *multipart/form-data* body, the HTTP *Content-Disposition* general header is a header that
/// can be used on the subpart of a multipart body to give information about the field it applies to.
/// The subpart is delimited by the boundary defined in the *Content-Type* header. Used on the body
/// itself, *Content-Disposition* has no effect.
///
/// # ABNF
/// ```text
/// content-disposition = "Content-Disposition" ":"
/// disposition-type *( ";" disposition-parm )
///
/// disposition-type = "inline" | "attachment" | disp-ext-type
/// ; case-insensitive
///
/// disp-ext-type = token
///
/// disposition-parm = filename-parm | disp-ext-parm
///
/// filename-parm = "filename" "=" value
/// | "filename*" "=" ext-value
///
/// disp-ext-parm = token "=" value
/// | ext-token "=" ext-value
///
/// ext-token = <the characters in token, followed by "*">
/// ```
///
/// **Note**: filename* [must not](https://tools.ietf.org/html/rfc7578#section-4.2) be used within
/// *multipart/form-data*.
///
/// # Example
///
/// ```
/// use actix_http::http::header::{
/// Charset, ContentDisposition, DispositionParam, DispositionType,
/// ExtendedValue,
/// };
///
/// let cd1 = ContentDisposition {
/// disposition: DispositionType::Attachment,
/// parameters: vec![DispositionParam::FilenameExt(ExtendedValue {
/// charset: Charset::Iso_8859_1, // The character set for the bytes of the filename
/// language_tag: None, // The optional language tag (see `language-tag` crate)
/// value: b"\xa9 Copyright 1989.txt".to_vec(), // the actual bytes of the filename
/// })],
/// };
/// assert!(cd1.is_attachment());
/// assert!(cd1.get_filename_ext().is_some());
///
/// let cd2 = ContentDisposition {
/// disposition: DispositionType::FormData,
/// parameters: vec![
/// DispositionParam::Name(String::from("file")),
/// DispositionParam::Filename(String::from("bill.odt")),
/// ],
/// };
/// assert_eq!(cd2.get_name(), Some("file")); // field name
/// assert_eq!(cd2.get_filename(), Some("bill.odt"));
/// ```
///
/// # WARN
/// If "filename" parameter is supplied, do not use the file name blindly, check and possibly
/// change to match local file system conventions if applicable, and do not use directory path
/// information that may be present. See [RFC2183](https://tools.ietf.org/html/rfc2183#section-2.3)
/// .
#[derive(Clone, Debug, PartialEq)]
pub struct ContentDisposition {
/// The disposition type
pub disposition: DispositionType,
/// Disposition parameters
pub parameters: Vec<DispositionParam>,
}
impl ContentDisposition {
/// Parse a raw Content-Disposition header value.
pub fn from_raw(hv: &header::HeaderValue) -> Result<Self, crate::error::ParseError> {
// `header::from_one_raw_str` invokes `hv.to_str` which assumes `hv` contains only visible
// ASCII characters. So `hv.as_bytes` is necessary here.
let hv = String::from_utf8(hv.as_bytes().to_vec())
.map_err(|_| crate::error::ParseError::Header)?;
let (disp_type, mut left) = split_once_and_trim(hv.as_str().trim(), ';');
if disp_type.is_empty() {
return Err(crate::error::ParseError::Header);
}
let mut cd = ContentDisposition {
disposition: disp_type.into(),
parameters: Vec::new(),
};
while !left.is_empty() {
let (param_name, new_left) = split_once_and_trim(left, '=');
if param_name.is_empty() || param_name == "*" || new_left.is_empty() {
return Err(crate::error::ParseError::Header);
}
left = new_left;
if param_name.ends_with('*') {
// extended parameters
let param_name = &param_name[..param_name.len() - 1]; // trim asterisk
let (ext_value, new_left) = split_once_and_trim(left, ';');
left = new_left;
let ext_value = header::parse_extended_value(ext_value)?;
let param = if param_name.eq_ignore_ascii_case("filename") {
DispositionParam::FilenameExt(ext_value)
} else {
DispositionParam::UnknownExt(param_name.to_owned(), ext_value)
};
cd.parameters.push(param);
} else {
// regular parameters
let value = if left.starts_with('\"') {
// quoted-string: defined in RFC6266 -> RFC2616 Section 3.6
let mut escaping = false;
let mut quoted_string = vec![];
let mut end = None;
// search for closing quote
for (i, &c) in left.as_bytes().iter().skip(1).enumerate() {
if escaping {
escaping = false;
quoted_string.push(c);
} else if c == 0x5c {
// backslash
escaping = true;
} else if c == 0x22 {
// double quote
end = Some(i + 1); // cuz skipped 1 for the leading quote
break;
} else {
quoted_string.push(c);
}
}
left = &left[end.ok_or(crate::error::ParseError::Header)? + 1..];
left = split_once(left, ';').1.trim_start();
// In fact, it should not be Err if the above code is correct.
String::from_utf8(quoted_string)
.map_err(|_| crate::error::ParseError::Header)?
} else {
// token: won't contains semicolon according to RFC 2616 Section 2.2
let (token, new_left) = split_once_and_trim(left, ';');
left = new_left;
token.to_owned()
};
if value.is_empty() {
return Err(crate::error::ParseError::Header);
}
let param = if param_name.eq_ignore_ascii_case("name") {
DispositionParam::Name(value)
} else if param_name.eq_ignore_ascii_case("filename") {
DispositionParam::Filename(value)
} else {
DispositionParam::Unknown(param_name.to_owned(), value)
};
cd.parameters.push(param);
}
}
Ok(cd)
}
/// Returns `true` if it is [`Inline`](DispositionType::Inline).
pub fn is_inline(&self) -> bool {
match self.disposition {
DispositionType::Inline => true,
_ => false,
}
}
/// Returns `true` if it is [`Attachment`](DispositionType::Attachment).
pub fn is_attachment(&self) -> bool {
match self.disposition {
DispositionType::Attachment => true,
_ => false,
}
}
/// Returns `true` if it is [`FormData`](DispositionType::FormData).
pub fn is_form_data(&self) -> bool {
match self.disposition {
DispositionType::FormData => true,
_ => false,
}
}
/// Returns `true` if it is [`Ext`](DispositionType::Ext) and the `disp_type` matches.
pub fn is_ext<T: AsRef<str>>(&self, disp_type: T) -> bool {
match self.disposition {
DispositionType::Ext(ref t)
if t.eq_ignore_ascii_case(disp_type.as_ref()) =>
{
true
}
_ => false,
}
}
/// Return the value of *name* if exists.
pub fn get_name(&self) -> Option<&str> {
self.parameters.iter().filter_map(|p| p.as_name()).nth(0)
}
/// Return the value of *filename* if exists.
pub fn get_filename(&self) -> Option<&str> {
self.parameters
.iter()
.filter_map(|p| p.as_filename())
.nth(0)
}
/// Return the value of *filename\** if exists.
pub fn get_filename_ext(&self) -> Option<&ExtendedValue> {
self.parameters
.iter()
.filter_map(|p| p.as_filename_ext())
.nth(0)
}
/// Return the value of the parameter which the `name` matches.
pub fn get_unknown<T: AsRef<str>>(&self, name: T) -> Option<&str> {
let name = name.as_ref();
self.parameters
.iter()
.filter_map(|p| p.as_unknown(name))
.nth(0)
}
/// Return the value of the extended parameter which the `name` matches.
pub fn get_unknown_ext<T: AsRef<str>>(&self, name: T) -> Option<&ExtendedValue> {
let name = name.as_ref();
self.parameters
.iter()
.filter_map(|p| p.as_unknown_ext(name))
.nth(0)
}
}
impl IntoHeaderValue for ContentDisposition {
type Error = header::InvalidHeaderValueBytes;
fn try_into(self) -> Result<header::HeaderValue, Self::Error> {
let mut writer = Writer::new();
let _ = write!(&mut writer, "{}", self);
header::HeaderValue::from_shared(writer.take())
}
}
impl Header for ContentDisposition {
fn name() -> header::HeaderName {
header::CONTENT_DISPOSITION
}
fn parse<T: crate::HttpMessage>(msg: &T) -> Result<Self, crate::error::ParseError> {
if let Some(h) = msg.headers().get(&Self::name()) {
Self::from_raw(&h)
} else {
Err(crate::error::ParseError::Header)
}
}
}
impl fmt::Display for DispositionType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self {
DispositionType::Inline => write!(f, "inline"),
DispositionType::Attachment => write!(f, "attachment"),
DispositionType::FormData => write!(f, "form-data"),
DispositionType::Ext(ref s) => write!(f, "{}", s),
}
}
}
impl fmt::Display for DispositionParam {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// All ASCII control charaters (0-30, 127) excepting horizontal tab, double quote, and
// backslash should be escaped in quoted-string (i.e. "foobar").
// Ref: RFC6266 S4.1 -> RFC2616 S2.2; RFC 7578 S4.2 -> RFC2183 S2 -> ... .
lazy_static! {
static ref RE: Regex = Regex::new("[\x01-\x08\x10\x1F\x7F\"\\\\]").unwrap();
}
match self {
DispositionParam::Name(ref value) => write!(f, "name={}", value),
DispositionParam::Filename(ref value) => {
write!(f, "filename=\"{}\"", RE.replace_all(value, "\\$0").as_ref())
}
DispositionParam::Unknown(ref name, ref value) => write!(
f,
"{}=\"{}\"",
name,
&RE.replace_all(value, "\\$0").as_ref()
),
DispositionParam::FilenameExt(ref ext_value) => {
write!(f, "filename*={}", ext_value)
}
DispositionParam::UnknownExt(ref name, ref ext_value) => {
write!(f, "{}*={}", name, ext_value)
}
}
}
}
impl fmt::Display for ContentDisposition {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{}", self.disposition)?;
self.parameters
.iter()
.map(|param| write!(f, "; {}", param))
.collect()
}
}
#[cfg(test)]
mod tests {
use super::{ContentDisposition, DispositionParam, DispositionType};
use crate::header::shared::Charset;
use crate::header::{ExtendedValue, HeaderValue};
#[test]
fn test_from_raw_basic() {
assert!(ContentDisposition::from_raw(&HeaderValue::from_static("")).is_err());
let a = HeaderValue::from_static(
"form-data; dummy=3; name=upload; filename=\"sample.png\"",
);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::FormData,
parameters: vec![
DispositionParam::Unknown("dummy".to_owned(), "3".to_owned()),
DispositionParam::Name("upload".to_owned()),
DispositionParam::Filename("sample.png".to_owned()),
],
};
assert_eq!(a, b);
let a = HeaderValue::from_static("attachment; filename=\"image.jpg\"");
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::Attachment,
parameters: vec![DispositionParam::Filename("image.jpg".to_owned())],
};
assert_eq!(a, b);
let a = HeaderValue::from_static("inline; filename=image.jpg");
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::Inline,
parameters: vec![DispositionParam::Filename("image.jpg".to_owned())],
};
assert_eq!(a, b);
let a = HeaderValue::from_static(
"attachment; creation-date=\"Wed, 12 Feb 1997 16:29:51 -0500\"",
);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::Attachment,
parameters: vec![DispositionParam::Unknown(
String::from("creation-date"),
"Wed, 12 Feb 1997 16:29:51 -0500".to_owned(),
)],
};
assert_eq!(a, b);
}
#[test]
fn test_from_raw_extended() {
let a = HeaderValue::from_static(
"attachment; filename*=UTF-8''%c2%a3%20and%20%e2%82%ac%20rates",
);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::Attachment,
parameters: vec![DispositionParam::FilenameExt(ExtendedValue {
charset: Charset::Ext(String::from("UTF-8")),
language_tag: None,
value: vec![
0xc2, 0xa3, 0x20, b'a', b'n', b'd', 0x20, 0xe2, 0x82, 0xac, 0x20,
b'r', b'a', b't', b'e', b's',
],
})],
};
assert_eq!(a, b);
let a = HeaderValue::from_static(
"attachment; filename*=UTF-8''%c2%a3%20and%20%e2%82%ac%20rates",
);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::Attachment,
parameters: vec![DispositionParam::FilenameExt(ExtendedValue {
charset: Charset::Ext(String::from("UTF-8")),
language_tag: None,
value: vec![
0xc2, 0xa3, 0x20, b'a', b'n', b'd', 0x20, 0xe2, 0x82, 0xac, 0x20,
b'r', b'a', b't', b'e', b's',
],
})],
};
assert_eq!(a, b);
}
#[test]
fn test_from_raw_extra_whitespace() {
let a = HeaderValue::from_static(
"form-data ; du-mmy= 3 ; name =upload ; filename = \"sample.png\" ; ",
);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::FormData,
parameters: vec![
DispositionParam::Unknown("du-mmy".to_owned(), "3".to_owned()),
DispositionParam::Name("upload".to_owned()),
DispositionParam::Filename("sample.png".to_owned()),
],
};
assert_eq!(a, b);
}
#[test]
fn test_from_raw_unordered() {
let a = HeaderValue::from_static(
"form-data; dummy=3; filename=\"sample.png\" ; name=upload;",
// Actually, a trailling semolocon is not compliant. But it is fine to accept.
);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::FormData,
parameters: vec![
DispositionParam::Unknown("dummy".to_owned(), "3".to_owned()),
DispositionParam::Filename("sample.png".to_owned()),
DispositionParam::Name("upload".to_owned()),
],
};
assert_eq!(a, b);
let a = HeaderValue::from_str(
"attachment; filename*=iso-8859-1''foo-%E4.html; filename=\"foo-ä.html\"",
)
.unwrap();
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::Attachment,
parameters: vec![
DispositionParam::FilenameExt(ExtendedValue {
charset: Charset::Iso_8859_1,
language_tag: None,
value: b"foo-\xe4.html".to_vec(),
}),
DispositionParam::Filename("foo-ä.html".to_owned()),
],
};
assert_eq!(a, b);
}
#[test]
fn test_from_raw_only_disp() {
let a = ContentDisposition::from_raw(&HeaderValue::from_static("attachment"))
.unwrap();
let b = ContentDisposition {
disposition: DispositionType::Attachment,
parameters: vec![],
};
assert_eq!(a, b);
let a =
ContentDisposition::from_raw(&HeaderValue::from_static("inline ;")).unwrap();
let b = ContentDisposition {
disposition: DispositionType::Inline,
parameters: vec![],
};
assert_eq!(a, b);
let a = ContentDisposition::from_raw(&HeaderValue::from_static(
"unknown-disp-param",
))
.unwrap();
let b = ContentDisposition {
disposition: DispositionType::Ext(String::from("unknown-disp-param")),
parameters: vec![],
};
assert_eq!(a, b);
}
#[test]
fn from_raw_with_mixed_case() {
let a = HeaderValue::from_str(
"InLInE; fIlenAME*=iso-8859-1''foo-%E4.html; filEName=\"foo-ä.html\"",
)
.unwrap();
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::Inline,
parameters: vec![
DispositionParam::FilenameExt(ExtendedValue {
charset: Charset::Iso_8859_1,
language_tag: None,
value: b"foo-\xe4.html".to_vec(),
}),
DispositionParam::Filename("foo-ä.html".to_owned()),
],
};
assert_eq!(a, b);
}
#[test]
fn from_raw_with_unicode() {
/* RFC7578 Section 4.2:
Some commonly deployed systems use multipart/form-data with file names directly encoded
including octets outside the US-ASCII range. The encoding used for the file names is
typically UTF-8, although HTML forms will use the charset associated with the form.
Mainstream browsers like Firefox (gecko) and Chrome use UTF-8 directly as above.
(And now, only UTF-8 is handled by this implementation.)
*/
let a =
HeaderValue::from_str("form-data; name=upload; filename=\"文件.webp\"")
.unwrap();
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::FormData,
parameters: vec![
DispositionParam::Name(String::from("upload")),
DispositionParam::Filename(String::from("文件.webp")),
],
};
assert_eq!(a, b);
let a =
HeaderValue::from_str("form-data; name=upload; filename=\"余固知謇謇之為患兮,忍而不能舍也.pptx\"").unwrap();
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::FormData,
parameters: vec![
DispositionParam::Name(String::from("upload")),
DispositionParam::Filename(String::from(
"余固知謇謇之為患兮,忍而不能舍也.pptx",
)),
],
};
assert_eq!(a, b);
}
#[test]
fn test_from_raw_escape() {
let a = HeaderValue::from_static(
"form-data; dummy=3; name=upload; filename=\"s\\amp\\\"le.png\"",
);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::FormData,
parameters: vec![
DispositionParam::Unknown("dummy".to_owned(), "3".to_owned()),
DispositionParam::Name("upload".to_owned()),
DispositionParam::Filename(
['s', 'a', 'm', 'p', '\"', 'l', 'e', '.', 'p', 'n', 'g']
.iter()
.collect(),
),
],
};
assert_eq!(a, b);
}
#[test]
fn test_from_raw_semicolon() {
let a =
HeaderValue::from_static("form-data; filename=\"A semicolon here;.pdf\"");
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::FormData,
parameters: vec![DispositionParam::Filename(String::from(
"A semicolon here;.pdf",
))],
};
assert_eq!(a, b);
}
#[test]
fn test_from_raw_uncessary_percent_decode() {
let a = HeaderValue::from_static(
"form-data; name=photo; filename=\"%74%65%73%74%2e%70%6e%67\"", // Should not be decoded!
);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::FormData,
parameters: vec![
DispositionParam::Name("photo".to_owned()),
DispositionParam::Filename(String::from("%74%65%73%74%2e%70%6e%67")),
],
};
assert_eq!(a, b);
let a = HeaderValue::from_static(
"form-data; name=photo; filename=\"%74%65%73%74.png\"",
);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::FormData,
parameters: vec![
DispositionParam::Name("photo".to_owned()),
DispositionParam::Filename(String::from("%74%65%73%74.png")),
],
};
assert_eq!(a, b);
}
#[test]
fn test_from_raw_param_value_missing() {
let a = HeaderValue::from_static("form-data; name=upload ; filename=");
assert!(ContentDisposition::from_raw(&a).is_err());
let a = HeaderValue::from_static("attachment; dummy=; filename=invoice.pdf");
assert!(ContentDisposition::from_raw(&a).is_err());
let a = HeaderValue::from_static("inline; filename= ");
assert!(ContentDisposition::from_raw(&a).is_err());
}
#[test]
fn test_from_raw_param_name_missing() {
let a = HeaderValue::from_static("inline; =\"test.txt\"");
assert!(ContentDisposition::from_raw(&a).is_err());
let a = HeaderValue::from_static("inline; =diary.odt");
assert!(ContentDisposition::from_raw(&a).is_err());
let a = HeaderValue::from_static("inline; =");
assert!(ContentDisposition::from_raw(&a).is_err());
}
#[test]
fn test_display_extended() {
let as_string =
"attachment; filename*=UTF-8'en'%C2%A3%20and%20%E2%82%AC%20rates";
let a = HeaderValue::from_static(as_string);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let display_rendered = format!("{}", a);
assert_eq!(as_string, display_rendered);
let a = HeaderValue::from_static("attachment; filename=colourful.csv");
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let display_rendered = format!("{}", a);
assert_eq!(
"attachment; filename=\"colourful.csv\"".to_owned(),
display_rendered
);
}
#[test]
fn test_display_quote() {
let as_string = "form-data; name=upload; filename=\"Quote\\\"here.png\"";
as_string
.find(['\\', '\"'].iter().collect::<String>().as_str())
.unwrap(); // ensure `\"` is there
let a = HeaderValue::from_static(as_string);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let display_rendered = format!("{}", a);
assert_eq!(as_string, display_rendered);
}
#[test]
fn test_display_space_tab() {
let as_string = "form-data; name=upload; filename=\"Space here.png\"";
let a = HeaderValue::from_static(as_string);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let display_rendered = format!("{}", a);
assert_eq!(as_string, display_rendered);
let a: ContentDisposition = ContentDisposition {
disposition: DispositionType::Inline,
parameters: vec![DispositionParam::Filename(String::from("Tab\there.png"))],
};
let display_rendered = format!("{}", a);
assert_eq!("inline; filename=\"Tab\x09here.png\"", display_rendered);
}
#[test]
fn test_display_control_characters() {
/* let a = "attachment; filename=\"carriage\rreturn.png\"";
let a = HeaderValue::from_static(a);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let display_rendered = format!("{}", a);
assert_eq!(
"attachment; filename=\"carriage\\\rreturn.png\"",
display_rendered
);*/
// No way to create a HeaderValue containing a carriage return.
let a: ContentDisposition = ContentDisposition {
disposition: DispositionType::Inline,
parameters: vec![DispositionParam::Filename(String::from("bell\x07.png"))],
};
let display_rendered = format!("{}", a);
assert_eq!("inline; filename=\"bell\\\x07.png\"", display_rendered);
}
#[test]
fn test_param_methods() {
let param = DispositionParam::Filename(String::from("sample.txt"));
assert!(param.is_filename());
assert_eq!(param.as_filename().unwrap(), "sample.txt");
let param = DispositionParam::Unknown(String::from("foo"), String::from("bar"));
assert!(param.is_unknown("foo"));
assert_eq!(param.as_unknown("fOo"), Some("bar"));
}
#[test]
fn test_disposition_methods() {
let cd = ContentDisposition {
disposition: DispositionType::FormData,
parameters: vec![
DispositionParam::Unknown("dummy".to_owned(), "3".to_owned()),
DispositionParam::Name("upload".to_owned()),
DispositionParam::Filename("sample.png".to_owned()),
],
};
assert_eq!(cd.get_name(), Some("upload"));
assert_eq!(cd.get_unknown("dummy"), Some("3"));
assert_eq!(cd.get_filename(), Some("sample.png"));
assert_eq!(cd.get_unknown_ext("dummy"), None);
assert_eq!(cd.get_unknown("duMMy"), Some("3"));
}
}

View File

@ -1,4 +1,4 @@
use header::{QualityItem, CONTENT_LANGUAGE};
use crate::header::{QualityItem, CONTENT_LANGUAGE};
use language_tags::LanguageTag;
header! {
@ -24,13 +24,13 @@ header! {
/// # Examples
///
/// ```rust
/// # extern crate actix_web;
/// # extern crate actix_http;
/// # #[macro_use] extern crate language_tags;
/// use actix_web::HttpResponse;
/// # use actix_web::http::header::{ContentLanguage, qitem};
/// use actix_http::Response;
/// # use actix_http::http::header::{ContentLanguage, qitem};
/// #
/// # fn main() {
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
/// builder.set(
/// ContentLanguage(vec![
/// qitem(langtag!(en)),
@ -40,14 +40,14 @@ header! {
/// ```
///
/// ```rust
/// # extern crate actix_web;
/// # extern crate actix_http;
/// # #[macro_use] extern crate language_tags;
/// use actix_web::HttpResponse;
/// # use actix_web::http::header::{ContentLanguage, qitem};
/// use actix_http::Response;
/// # use actix_http::http::header::{ContentLanguage, qitem};
/// #
/// # fn main() {
///
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
/// builder.set(
/// ContentLanguage(vec![
/// qitem(langtag!(da)),

View File

@ -1,9 +1,11 @@
use error::ParseError;
use header::{HeaderValue, IntoHeaderValue, InvalidHeaderValueBytes, Writer,
CONTENT_RANGE};
use std::fmt::{self, Display, Write};
use std::str::FromStr;
use crate::error::ParseError;
use crate::header::{
HeaderValue, IntoHeaderValue, InvalidHeaderValueBytes, Writer, CONTENT_RANGE,
};
header! {
/// `Content-Range` header, defined in
/// [RFC7233](http://tools.ietf.org/html/rfc7233#section-4.2)
@ -131,9 +133,7 @@ impl FromStr for ContentRangeSpec {
let instance_length = if instance_length == "*" {
None
} else {
Some(instance_length
.parse()
.map_err(|_| ParseError::Header)?)
Some(instance_length.parse().map_err(|_| ParseError::Header)?)
};
let range = if range == "*" {
@ -141,7 +141,8 @@ impl FromStr for ContentRangeSpec {
} else {
let (first_byte, last_byte) =
split_in_two(range, '-').ok_or(ParseError::Header)?;
let first_byte = first_byte.parse().map_err(|_| ParseError::Header)?;
let first_byte =
first_byte.parse().map_err(|_| ParseError::Header)?;
let last_byte = last_byte.parse().map_err(|_| ParseError::Header)?;
if last_byte < first_byte {
return Err(ParseError::Header);
@ -187,10 +188,7 @@ impl Display for ContentRangeSpec {
f.write_str("*")
}
}
ContentRangeSpec::Unregistered {
ref unit,
ref resp,
} => {
ContentRangeSpec::Unregistered { ref unit, ref resp } => {
f.write_str(unit)?;
f.write_str(" ")?;
f.write_str(resp)

View File

@ -1,5 +1,5 @@
use header::CONTENT_TYPE;
use mime::{self, Mime};
use crate::header::CONTENT_TYPE;
use mime::Mime;
header! {
/// `Content-Type` header, defined in
@ -31,11 +31,11 @@ header! {
/// # Examples
///
/// ```rust
/// use actix_web::HttpResponse;
/// use actix_web::http::header::ContentType;
/// use actix_http::Response;
/// use actix_http::http::header::ContentType;
///
/// # fn main() {
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
/// builder.set(
/// ContentType::json()
/// );
@ -44,13 +44,13 @@ header! {
///
/// ```rust
/// # extern crate mime;
/// # extern crate actix_web;
/// # extern crate actix_http;
/// use mime::TEXT_HTML;
/// use actix_web::HttpResponse;
/// use actix_web::http::header::ContentType;
/// use actix_http::Response;
/// use actix_http::http::header::ContentType;
///
/// # fn main() {
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
/// builder.set(
/// ContentType(TEXT_HTML)
/// );
@ -62,7 +62,7 @@ header! {
test_header!(
test1,
vec![b"text/html"],
Some(HeaderField(TEXT_HTML)));
Some(HeaderField(mime::TEXT_HTML)));
}
}

View File

@ -1,4 +1,4 @@
use header::{HttpDate, DATE};
use crate::header::{HttpDate, DATE};
use std::time::SystemTime;
header! {
@ -20,11 +20,11 @@ header! {
/// # Example
///
/// ```rust
/// use actix_web::HttpResponse;
/// use actix_web::http::header::Date;
/// use actix_http::Response;
/// use actix_http::http::header::Date;
/// use std::time::SystemTime;
///
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
/// builder.set(Date(SystemTime::now().into()));
/// ```
(Date, DATE) => [HttpDate]
@ -35,6 +35,7 @@ header! {
}
impl Date {
/// Create a date instance set to the current system time
pub fn now() -> Date {
Date(SystemTime::now().into())
}

View File

@ -1,4 +1,4 @@
use header::{EntityTag, ETAG};
use crate::header::{EntityTag, ETAG};
header! {
/// `ETag` header, defined in [RFC7232](http://tools.ietf.org/html/rfc7232#section-2.3)
@ -28,18 +28,18 @@ header! {
/// # Examples
///
/// ```rust
/// use actix_web::HttpResponse;
/// use actix_web::http::header::{ETag, EntityTag};
/// use actix_http::Response;
/// use actix_http::http::header::{ETag, EntityTag};
///
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
/// builder.set(ETag(EntityTag::new(false, "xyzzy".to_owned())));
/// ```
///
/// ```rust
/// use actix_web::HttpResponse;
/// use actix_web::http::header::{ETag, EntityTag};
/// use actix_http::Response;
/// use actix_http::http::header::{ETag, EntityTag};
///
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
/// builder.set(ETag(EntityTag::new(true, "xyzzy".to_owned())));
/// ```
(ETag, ETAG) => [EntityTag]

View File

@ -1,4 +1,4 @@
use header::{HttpDate, EXPIRES};
use crate::header::{HttpDate, EXPIRES};
header! {
/// `Expires` header, defined in [RFC7234](http://tools.ietf.org/html/rfc7234#section-5.3)
@ -22,11 +22,11 @@ header! {
/// # Example
///
/// ```rust
/// use actix_web::HttpResponse;
/// use actix_web::http::header::Expires;
/// use actix_http::Response;
/// use actix_http::http::header::Expires;
/// use std::time::{SystemTime, Duration};
///
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
/// let expiration = SystemTime::now() + Duration::from_secs(60 * 60 * 24);
/// builder.set(Expires(expiration.into()));
/// ```

View File

@ -1,4 +1,4 @@
use header::{EntityTag, IF_MATCH};
use crate::header::{EntityTag, IF_MATCH};
header! {
/// `If-Match` header, defined in
@ -30,18 +30,18 @@ header! {
/// # Examples
///
/// ```rust
/// use actix_web::HttpResponse;
/// use actix_web::http::header::IfMatch;
/// use actix_http::Response;
/// use actix_http::http::header::IfMatch;
///
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
/// builder.set(IfMatch::Any);
/// ```
///
/// ```rust
/// use actix_web::HttpResponse;
/// use actix_web::http::header::{IfMatch, EntityTag};
/// use actix_http::Response;
/// use actix_http::http::header::{IfMatch, EntityTag};
///
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
/// builder.set(
/// IfMatch::Items(vec![
/// EntityTag::new(false, "xyzzy".to_owned()),

View File

@ -1,4 +1,4 @@
use header::{HttpDate, IF_MODIFIED_SINCE};
use crate::header::{HttpDate, IF_MODIFIED_SINCE};
header! {
/// `If-Modified-Since` header, defined in
@ -22,11 +22,11 @@ header! {
/// # Example
///
/// ```rust
/// use actix_web::HttpResponse;
/// use actix_web::http::header::IfModifiedSince;
/// use actix_http::Response;
/// use actix_http::http::header::IfModifiedSince;
/// use std::time::{SystemTime, Duration};
///
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
/// let modified = SystemTime::now() - Duration::from_secs(60 * 60 * 24);
/// builder.set(IfModifiedSince(modified.into()));
/// ```

View File

@ -1,4 +1,4 @@
use header::{EntityTag, IF_NONE_MATCH};
use crate::header::{EntityTag, IF_NONE_MATCH};
header! {
/// `If-None-Match` header, defined in
@ -32,18 +32,18 @@ header! {
/// # Examples
///
/// ```rust
/// use actix_web::HttpResponse;
/// use actix_web::http::header::IfNoneMatch;
/// use actix_http::Response;
/// use actix_http::http::header::IfNoneMatch;
///
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
/// builder.set(IfNoneMatch::Any);
/// ```
///
/// ```rust
/// use actix_web::HttpResponse;
/// use actix_web::http::header::{IfNoneMatch, EntityTag};
/// use actix_http::Response;
/// use actix_http::http::header::{IfNoneMatch, EntityTag};
///
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
/// builder.set(
/// IfNoneMatch::Items(vec![
/// EntityTag::new(false, "xyzzy".to_owned()),
@ -66,8 +66,8 @@ header! {
#[cfg(test)]
mod tests {
use super::IfNoneMatch;
use header::{EntityTag, Header, IF_NONE_MATCH};
use test::TestRequest;
use crate::header::{EntityTag, Header, IF_NONE_MATCH};
use crate::test::TestRequest;
#[test]
fn test_if_none_match() {

View File

@ -1,11 +1,12 @@
use error::ParseError;
use header::from_one_raw_str;
use header::{EntityTag, Header, HeaderName, HeaderValue, HttpDate, IntoHeaderValue,
InvalidHeaderValueBytes, Writer};
use http::header;
use httpmessage::HttpMessage;
use std::fmt::{self, Display, Write};
use crate::error::ParseError;
use crate::header::{
self, from_one_raw_str, EntityTag, Header, HeaderName, HeaderValue, HttpDate,
IntoHeaderValue, InvalidHeaderValueBytes, Writer,
};
use crate::httpmessage::HttpMessage;
/// `If-Range` header, defined in [RFC7233](http://tools.ietf.org/html/rfc7233#section-3.2)
///
/// If a client has a partial copy of a representation and wishes to have
@ -35,10 +36,10 @@ use std::fmt::{self, Display, Write};
/// # Examples
///
/// ```rust
/// use actix_web::HttpResponse;
/// use actix_web::http::header::{EntityTag, IfRange};
/// use actix_http::Response;
/// use actix_http::http::header::{EntityTag, IfRange};
///
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
/// builder.set(IfRange::EntityTag(EntityTag::new(
/// false,
/// "xyzzy".to_owned(),
@ -46,11 +47,11 @@ use std::fmt::{self, Display, Write};
/// ```
///
/// ```rust
/// use actix_web::HttpResponse;
/// use actix_web::http::header::IfRange;
/// use actix_http::Response;
/// use actix_http::http::header::IfRange;
/// use std::time::{Duration, SystemTime};
///
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
/// let fetched = SystemTime::now() - Duration::from_secs(60 * 60 * 24);
/// builder.set(IfRange::Date(fetched.into()));
/// ```
@ -72,12 +73,12 @@ impl Header for IfRange {
T: HttpMessage,
{
let etag: Result<EntityTag, _> =
from_one_raw_str(msg.headers().get(header::IF_RANGE));
from_one_raw_str(msg.headers().get(&header::IF_RANGE));
if let Ok(etag) = etag {
return Ok(IfRange::EntityTag(etag));
}
let date: Result<HttpDate, _> =
from_one_raw_str(msg.headers().get(header::IF_RANGE));
from_one_raw_str(msg.headers().get(&header::IF_RANGE));
if let Ok(date) = date {
return Ok(IfRange::Date(date));
}
@ -107,7 +108,7 @@ impl IntoHeaderValue for IfRange {
#[cfg(test)]
mod test_if_range {
use super::IfRange as HeaderField;
use header::*;
use crate::header::*;
use std::str;
test_header!(test1, vec![b"Sat, 29 Oct 1994 19:43:31 GMT"]);
test_header!(test2, vec![b"\"xyzzy\""]);

View File

@ -1,4 +1,4 @@
use header::{HttpDate, IF_UNMODIFIED_SINCE};
use crate::header::{HttpDate, IF_UNMODIFIED_SINCE};
header! {
/// `If-Unmodified-Since` header, defined in
@ -23,11 +23,11 @@ header! {
/// # Example
///
/// ```rust
/// use actix_web::HttpResponse;
/// use actix_web::http::header::IfUnmodifiedSince;
/// use actix_http::Response;
/// use actix_http::http::header::IfUnmodifiedSince;
/// use std::time::{SystemTime, Duration};
///
/// let mut builder = HttpResponse::Ok();
/// let mut builder = Response::Ok();
/// let modified = SystemTime::now() - Duration::from_secs(60 * 60 * 24);
/// builder.set(IfUnmodifiedSince(modified.into()));
/// ```

Some files were not shown because too many files have changed in this diff Show More