1
0
mirror of https://github.com/fafhrd91/actix-web synced 2025-07-03 09:36:36 +02:00

Compare commits

...

645 Commits

Author SHA1 Message Date
9668a2396f prepare actix-router release 0.5.0-rc.2 2022-01-21 17:21:46 +00:00
cb7347216c add Logger::log_target (#2594) 2022-01-21 17:19:17 +00:00
ae7f71e317 remove ambiguous HttpResponseBuilder::del_cookie (#2591) 2022-01-21 17:18:07 +00:00
bc89f0bfc2 s/example/examples 2022-01-21 16:56:33 +00:00
c959916346 fmt codegen 2022-01-20 01:54:57 +00:00
f227e880d7 refactor route codegen to be cleaner 2022-01-20 01:53:02 +00:00
68ad81f989 remove debug logs 2022-01-20 01:30:33 +00:00
f2e736719a add url_for test for conflicting named resources 2022-01-20 01:30:33 +00:00
81ef12a0fd add warn log to from_parts if given request is cloned
closes #2562
2022-01-19 22:23:53 +00:00
1bc1538118 use tokio::main in client example 2022-01-19 21:36:14 +00:00
1cc3e7b24c deprecate Path::path (#2590) 2022-01-19 20:26:33 +00:00
3dd98c308c document Path::unprocessed panic 2022-01-19 18:33:23 +00:00
cb5d9a7e64 bump deps to stable actix-server v2 2022-01-19 16:58:11 +00:00
5ee555462f add HttpResponse::add_removal_cookie (#2586) 2022-01-19 16:36:11 +00:00
ad159f5219 fix ClientResponse::body doc
fixes #2589
2022-01-19 15:52:16 +00:00
2ffc21dd4f move response extensions out of head (#2585) 2022-01-19 02:09:25 +00:00
7b8a392ef5 allow camel case response headers (#2587) 2022-01-16 03:16:26 +00:00
3c7ccf5521 update http changelog 2022-01-15 15:43:18 +00:00
e7cae5a95b migrate to brotli crate (#2538) 2022-01-15 14:03:16 +00:00
455d5c460d prepare actix-files release 0.6.0-beta.14 2022-01-14 20:01:11 +00:00
8faca783fa prepare actix-web release 4.0.0-beta.20 2022-01-14 20:00:26 +00:00
edbb9b047e prepare actix-router release 0.5.0-rc.1 2022-01-14 19:59:36 +00:00
32742d0715 support opaque app in test helpers (#2584) 2022-01-14 19:45:32 +00:00
d90c1a2331 convert error in Result extractor (#2581)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2022-01-12 18:59:22 +00:00
2a12b41456 fix support for 12 extractors (#2582)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2022-01-12 18:31:48 +00:00
6c97d448b7 update tokio-uring to 0.2 (#2583) 2022-01-12 17:53:36 +00:00
c3ce33df05 unify generics across App, Scope and Resource (#2572)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2022-01-05 15:02:28 +00:00
4431c8da65 fix bench 2022-01-05 14:10:38 +00:00
2d11ab5977 Add ServiceConfig::configure (#1988)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2022-01-05 12:31:39 +00:00
4ebf16890d add GuardContext::header (#2569) 2022-01-05 11:47:14 +00:00
fe0bbfb3da optimize PathDeserializer (#2570) 2022-01-05 10:48:20 +00:00
2462b6dd5d generalize impl Responder for HttpResponse (#2567)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2022-01-05 04:42:52 +00:00
49cfabeaf5 simplify Resource trait (#2568)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2022-01-05 04:34:13 +00:00
0f7292c69a remove readme msrv link 2022-01-05 04:24:40 +00:00
8bbf2b5052 prepare actix-test release 0.1.0-beta.11 2022-01-04 15:37:48 +00:00
8c975bcc1f prepare actix-http-test release 3.0.0-beta.11 2022-01-04 15:37:33 +00:00
742ad56d30 prepare actix-web-actors release 4.0.0-beta.10 2022-01-04 15:37:14 +00:00
bcc8d5c441 prepare actix-multipart release 0.4.0-beta.12 2022-01-04 15:36:56 +00:00
f659098d21 prepare awc release 3.0.0-beta.18 2022-01-04 15:35:21 +00:00
8621ae12f8 prepare actix-web release 4.0.0-beta.19 2022-01-04 15:35:08 +00:00
b338eb8473 prepare actix-http release 3.0.0-beta.18 2022-01-04 15:34:52 +00:00
5abd1c2c2c prepare actix-web-codegen release 0.5.0-rc.1 2022-01-04 15:34:16 +00:00
05336269f9 prepare actix-router release 0.5.0-beta.4 2022-01-04 15:33:44 +00:00
86df295ee2 fully percent decode path segments when capturing (#2566) 2022-01-04 15:19:29 +00:00
85c9b1a263 move quoter 2022-01-04 12:58:40 +00:00
577597a80a rename on-connect example 2022-01-04 12:54:20 +00:00
374dc9bfc9 files: percent-decode url path (#2398)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2022-01-04 12:54:11 +00:00
93754f307f try path config from Data as well 2022-01-04 04:08:46 +00:00
c7639bc3be document quoter 2022-01-04 03:48:12 +00:00
0bc4ae9158 remove BodyEncoding trait (#2565) 2022-01-03 18:46:04 +00:00
19a46e3925 fix doc test 2022-01-03 15:35:47 +00:00
68cd853aa2 improve docs for Compress 2022-01-03 14:59:01 +00:00
25fe1bbaa5 add double compress layer test 2022-01-03 14:05:08 +00:00
e890307091 Fix AcceptEncoding header (#2501) 2022-01-03 13:17:57 +00:00
b708924590 only run nightly checks on master ci 2021-12-31 08:38:58 +00:00
5dcb250237 fix doc test 2021-12-31 07:53:53 +00:00
b4ff6addfe use match name if possible in data debug log 2021-12-30 07:15:57 +00:00
231a24ef8d improve application data docs 2021-12-30 07:11:35 +00:00
6df4974234 prepare awc release 3.0.0-beta.17 2021-12-29 10:17:28 +00:00
a80e93d6db prepare actix-web release 4.0.0-beta.18 2021-12-29 10:17:11 +00:00
542c92c9a7 tweak changelogs 2021-12-29 10:06:36 +00:00
74738c63a7 Upgrade time dependency (via cookie) (#2555) 2021-12-29 10:03:25 +00:00
a87e01f0d1 bump msrv to 1.54 2021-12-29 08:59:15 +00:00
9779010a5a prepare actix-files release 0.6.0-beta.12 2021-12-29 07:08:10 +00:00
11d50d792b prepare actix-web release 4.0.0-beta.17 2021-12-29 07:07:51 +00:00
798e9911e9 prepare awc release 3.0.0-beta.16 2021-12-29 07:07:46 +00:00
2b2de29800 never return port in realip_remote_addr (#2554) 2021-12-28 14:52:43 +00:00
0f5c876c6b tweak guard docs 2021-12-28 14:50:48 +00:00
96a4dc9dec use modern signatures for awc send_* and header methods (#2553) 2021-12-28 03:22:22 +00:00
4616ca8ee6 rework Guard trait (#2552) 2021-12-28 02:37:13 +00:00
36193b0a50 specify tokio dep to avoid RUSTSEC-2021-0124 warning 2021-12-27 18:54:10 +00:00
76684a786e update server dep to rc2 (#2550) 2021-12-27 18:45:31 +00:00
2308f8afa4 use const header values where possible 2021-12-27 16:15:33 +00:00
554ae7a868 rework Handler trait (#2549) 2021-12-27 00:44:30 +00:00
ac0c4eb684 update actix-tls references to stable 3.0.0 2021-12-26 21:24:03 +00:00
2e493cf791 remove crate level clippy allows 2021-12-25 04:53:51 +00:00
5860fe5381 expose Handler trait 2021-12-25 04:43:59 +00:00
adf9935841 improve scope documentation
closes #2389
2021-12-25 03:44:09 +00:00
34e5c7c799 Improve module docs for error handler middleware (#2543) 2021-12-25 02:35:19 +00:00
01cbfc5724 reduce -http re-exports in awc 2021-12-25 02:34:35 +00:00
3756dfc2ce move client to own module 2021-12-25 02:34:31 +00:00
d2590fd46c ClientRequest::send_body takes impl MessageBody (#2546) 2021-12-25 02:33:37 +00:00
1296e07c48 relax unpin bounds on payload types (#2545) 2021-12-24 17:47:47 +00:00
7b1512d863 allow any body type in Scope (#2523) 2021-12-22 15:48:59 +00:00
cd025f5c0b allow any body type in Resource (#2526) 2021-12-22 15:00:32 +00:00
1769812d0b bump outdated deps 2021-12-22 08:43:38 +00:00
324eba7e0b tighten tokio version range to prevent RUSTSEC-2021-0124 2021-12-22 08:41:44 +00:00
b3ac918d70 update itoa to v1 2021-12-22 08:34:48 +00:00
de20d21703 use dash hyphenation in markdown 2021-12-22 08:21:30 +00:00
212c6926f9 Revert "use dash hyphenation in changelogs"
This reverts commit 1ea619f2a1.
2021-12-22 08:18:44 +00:00
1ea619f2a1 use dash hyphenation in changelogs 2021-12-22 08:17:35 +00:00
40a0162074 add tests to scope and resource for returning from fns 2021-12-22 07:58:37 +00:00
f8488aff1e upstream changelog for v3.3.3 2021-12-22 07:20:53 +00:00
64c2e5e1cd remove crate level clippy lint 2021-12-22 07:16:07 +00:00
17f636a183 split request and response modules (#2530) 2021-12-19 17:05:27 +00:00
2e00776d5e Update FUNDING.yml 2021-12-19 04:18:57 +00:00
7d507a41ee Create FUNDING.yml 2021-12-19 03:58:29 +00:00
fb036264cc only build SslConnectorBuilder once (#2503) 2021-12-18 16:44:30 +00:00
d2b9724010 update bump script to detect prerelease versions 2021-12-18 03:27:32 +00:00
5c53db1e4d remove hidden anybody 2021-12-18 01:48:16 +00:00
84ea9e7e88 http: Replace header::map::GetAll with std::slice::Iter (#2527) 2021-12-18 00:05:12 +00:00
0bd5ccc432 update changelog 2021-12-17 21:39:15 +00:00
9cd8526085 prepare actix-router release 0.5.0-beta.3 2021-12-17 21:24:34 +00:00
73bbe56971 prepare actix-test release 0.1.0-beta.9 2021-12-17 21:00:15 +00:00
8340b63b7b prepare awc release 3.0.0-beta.14 2021-12-17 20:59:59 +00:00
6c2c7b68e2 prepare actix-web release 4.0.0-beta.15 2021-12-17 20:59:01 +00:00
7bf47967cc prepare actix-http release 3.0.0-beta.16 2021-12-17 20:57:51 +00:00
ae47d96fc6 use body::None in encoder body 2021-12-17 20:56:54 +00:00
5842a3279d update messagebody documentation 2021-12-17 19:35:08 +00:00
1d6f5ba6d6 improve codegen on BoxBody poll_next 2021-12-17 19:19:21 +00:00
aa31086af5 improve BoxBody Debug impl 2021-12-17 19:16:42 +00:00
57ea322ce5 simplify MessageBody::complete_body interface (#2522) 2021-12-17 19:09:08 +00:00
2cf27863cb remove direct dep on pin-project in -http (#2524) 2021-12-17 14:13:54 +00:00
5359fa56c2 include source for dispatch body errors 2021-12-17 01:29:41 +00:00
a2467718ac passthrough StreamLog error type 2021-12-17 01:27:27 +00:00
3c0d059d92 MessageBody::boxed (#2520)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-12-17 00:43:40 +00:00
44b7302845 minimize futures-util dep in actix-http 2021-12-16 22:26:45 +00:00
a6d5776481 various fixes to MessageBody::complete_body (#2519) 2021-12-16 22:25:10 +00:00
156cc20ac8 refactor testing utils (#2518) 2021-12-15 01:44:51 +00:00
dd4a372613 allow error handler middleware to return different body type (#2515) 2021-12-14 21:17:50 +00:00
05255c7f7c remove either crate conversions (#2516) 2021-12-14 19:57:18 +00:00
fb091b2b88 split up router pattern and resource_path modules 2021-12-14 18:59:59 +00:00
11ee8ec3ab align remaining header map terminology (#2510) 2021-12-13 16:08:08 +00:00
551a0d973c doc tweaks 2021-12-13 02:58:19 +00:00
cea44be670 add test for returning App from function 2021-12-11 16:18:28 +00:00
b41b346c00 inline trivial body methods 2021-12-11 16:05:08 +00:00
5b0a50249b prepare actix-multipart release 0.4.0-beta.10 2021-12-11 00:35:26 +00:00
60b030ff53 prepare actix-web-actors release 4.0.0-beta.8 2021-12-11 00:34:23 +00:00
fc4e9ff96b prepare actix-web-codegen release 0.5.0-beta.6 2021-12-11 00:33:31 +00:00
6481a5fb73 prepare actix-test release 0.1.0-beta.8 2021-12-11 00:32:26 +00:00
0cd7c17682 prepare actix-http-test release 3.0.0-beta.9 2021-12-11 00:32:00 +00:00
ed2f5b40b9 prepare actix-files release 0.6.0-beta.10 2021-12-11 00:31:41 +00:00
cc37be9700 prepare actix-web release 4.0.0-beta.14 2021-12-11 00:30:12 +00:00
e1cdabe5cb prepare awc release 3.0.0-beta.13 2021-12-11 00:28:38 +00:00
d0f4c809ca prepare actix-http release 3.0.0-beta.15 2021-12-11 00:22:09 +00:00
65dd5dfa7b bump script updates referenced crate versions 2021-12-11 00:21:30 +00:00
f62383a975 unpin h2 2021-12-10 22:13:12 +00:00
f9348d7129 add ServiceResponse::into_parts (#2499) 2021-12-09 14:57:27 +00:00
774ac7fec4 provide optimisation path for single-chunk body types (#2497) 2021-12-09 13:52:35 +00:00
69fa17f66f clean future h2 dispatcher 2021-12-09 11:27:29 +00:00
816d68dee8 pin h2 temporarily 2021-12-09 00:46:28 +00:00
7dc034f0fb Remove extensions from head (#2487) 2021-12-08 22:58:50 +00:00
07f2fe385b standardize crate level lints 2021-12-08 06:09:56 +00:00
406f694095 standardize rustfmt max_width 2021-12-08 06:01:11 +00:00
e49e559f47 fix some docs 2021-12-08 05:43:50 +00:00
d35b7644dc add connection level data container (#2491) 2021-12-07 17:23:34 +00:00
069cf2da07 enable scope middleware with generic res body. (#2492)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-12-07 16:26:28 +00:00
6460e67f84 remove generic body type in App. (#2493) 2021-12-07 15:53:04 +00:00
9587261c20 add fakeshadow's actix-web in actix-http example 2021-12-07 15:31:15 +00:00
606a371ec3 improve Data docs 2021-12-06 17:14:56 +00:00
bed72d9bb7 fix examples 2021-12-05 23:23:36 +00:00
c596f573a6 bump actix-server to rc.1 2021-12-05 21:25:15 +00:00
627c0dc22f workaround rustdoc bug for Error (#2489) 2021-12-05 16:19:08 +00:00
2d053b7036 remove actix_http::http module (#2488) 2021-12-05 14:37:20 +00:00
59be0c65c6 disallow query or fragements in url_for constructions (#2430)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-12-05 04:39:18 +00:00
e1a2d9c606 Quality / QualityItem improvements (#2486) 2021-12-05 03:38:08 +00:00
d89c706cd6 re-instate Range typed header (#2485)
Co-authored-by: RideWindX <ridewindx@gmail.com>
2021-12-05 00:02:25 +00:00
4c9ca7196d Add WsResponseBuilder to build web socket session response (#1920)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-12-04 22:32:44 +00:00
fa7f3e6908 undeprecate App::data_factory (#2484) 2021-12-04 19:41:15 +00:00
c7c02ef99d body ergonomics v3 (#2468) 2021-12-04 19:40:47 +00:00
a2d5c5a058 Use cilent time out for h2 handshake timeout. (#2483) 2021-12-02 18:16:34 +00:00
deece8d519 re-instate accept-encoding typed header (#2482) 2021-12-02 17:04:40 +00:00
2a72bdae09 improve typed header macro (#2481) 2021-12-02 15:25:39 +00:00
075d871e63 wrap LanguageTags type in new AnyOrSome type to support wildcards (#2480) 2021-12-02 13:59:25 +00:00
c4b20df56a convert all remaining IETF RFC links to new format 2021-12-02 03:45:04 +00:00
0df275c478 update all IETF RFC links to new URL format 2021-12-01 19:42:02 +00:00
697238fadc prepare actix-multipart release 0.4.0-beta.9 2021-12-01 00:26:07 +00:00
e045418038 prepare for actix-tls rc.1 (#2474) 2021-11-30 14:12:04 +00:00
a978b417f3 use actix ready future in remaining return types 2021-11-30 13:11:41 +00:00
fa82b698b7 remove pin-project from actix-web. (#2471) 2021-11-30 11:16:53 +00:00
fc4cdf81eb expose header::map module (#2470) 2021-11-29 02:22:47 +00:00
654dc64a09 don't hang after dropping mutipart (#2463) 2021-11-29 02:00:24 +00:00
cf54388534 re-work from request macro. (#2469) 2021-11-29 01:23:27 +00:00
39243095b5 guarantee ordering of header map get_all (#2467) 2021-11-28 19:23:29 +00:00
89c6d62656 clean up multipart and field stream trait impl (#2462) 2021-11-25 00:10:53 +00:00
52bbbd1d73 Mnior cleanup of multipart API. (#2461) 2021-11-24 20:53:11 +00:00
3e6e9779dc fix big5 charset parsing 2021-11-24 20:16:15 +00:00
9bdd334bb4 add test for duplicate dynamic segent name 2021-11-23 15:57:18 +00:00
bcbbc115aa fix awc changelog 2021-11-23 15:12:55 +00:00
ab5eb7c1aa prepare actix-multipart release 0.4.0-beta.8 2021-11-22 18:48:14 +00:00
18b8ef0765 prepare actix-test release 0.1.0-beta.7 2021-11-22 18:47:43 +00:00
b806b4773c prepare actix-http-test release 3.0.0-beta.7 2021-11-22 18:46:58 +00:00
0062d99b6f prepare actix-files release 0.6.0-beta.9 2021-11-22 18:46:19 +00:00
99e6a9c26d prepare awc release 3.0.0-beta.11 2021-11-22 18:41:43 +00:00
5f5bd2184e prepare actix-web release 4.0.0-beta.12 2021-11-22 18:20:55 +00:00
88e074879d prepare actix-http release 3.0.0-beta.13 2021-11-22 18:19:09 +00:00
e7987e7429 awc: support http2 over plain tcp with feature flag (#2439)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-11-22 18:16:56 +00:00
a172f5968d prepare for actix-tls v3 beta 9 (#2456) 2021-11-22 15:37:23 +00:00
a2a42ec152 use anybody in doc test 2021-11-22 01:35:33 +00:00
dd347e0bd0 implement io-uring for actix-files (#2408)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-11-22 01:19:09 +00:00
194a691537 files: 304 Not Modified responses omit Content-Length header (#2453) 2021-11-19 14:04:12 +00:00
56ee97f722 add files path traversal tests 2021-11-18 18:14:34 +00:00
66620a1012 simplify handler.rs (#2450) 2021-11-17 20:11:35 +00:00
e33618ed6d ensure content disposition header in multipart (#2451)
Co-authored-by: Craig Pastro <craig.pastro@gmail.com>
2021-11-17 17:44:50 +00:00
1fe309bcc6 increase ci test timeout 2021-11-17 15:32:42 +00:00
168a7284d3 fix actix_http::Error conversion. (#2449) 2021-11-17 13:13:05 +00:00
68a3acb9c2 bump zstd dep 2021-11-16 23:22:29 +00:00
84c6d25fd3 bump env logger dep 2021-11-16 23:07:08 +00:00
0a135c7dc9 bump actix-codec to 0.4.1 2021-11-16 22:41:24 +00:00
668a33c793 remove internal usage of Body 2021-11-16 22:10:30 +00:00
d8cbb879dd make AnyBody generic on Body type (#2448) 2021-11-16 21:41:35 +00:00
13cf5a9e44 remove chunked encoding header for websockets 2021-11-16 16:55:45 +00:00
4df1cd78b7 simplify AnyBody and BodySize (#2446) 2021-11-16 09:21:10 +00:00
e8a0e16863 run tarpaulin on workspace 2021-11-15 18:11:51 +00:00
a2f59c02f7 bump actix-server to beta 9 (#2442) 2021-11-15 04:03:33 +00:00
2754608f3c fix codegen tests 2021-11-08 02:46:43 +00:00
c020cedb63 Log internal server errors (#2387) 2021-11-07 17:02:23 +00:00
5e554dca35 fix awc clippy warning (#2431) 2021-11-04 15:57:55 +00:00
6ec2d7b909 add keep alive to h2 through ping pong (#2433) 2021-11-04 15:15:23 +00:00
ec6d284a8e improve "data no configured" message (#2429) 2021-10-31 13:19:21 +00:00
be9530eb72 avoid building actix-tls with no-default-features (#2426) 2021-10-26 13:16:48 +01:00
855e260fdb Add html_utf8 content type. (#2423) 2021-10-26 09:24:38 +01:00
d13854505f move actix_http::client module to awc (#2425) 2021-10-26 00:37:40 +01:00
d40b6748bc remove dead dep (#2420) 2021-10-22 00:22:58 +01:00
c79b9a0df3 prepare actix-files release 0.6.0-beta.8 2021-10-20 23:32:46 +01:00
4af414064b prepare actix-multipart release 0.4.0-beta.7 2021-10-20 23:31:46 +01:00
9abe166d52 actix-web beta 10 releases (#2417) 2021-10-20 22:32:05 +01:00
c09ec6af4c split off coverage ci job 2021-10-20 02:27:30 +01:00
37f2bf5625 clippy 2021-10-20 02:06:51 +01:00
4f6f0b0137 chore: Bump rustls to 0.20.0 (#2416)
Co-authored-by: Kirill Mironov <vetrokm@gmail.com>
2021-10-20 02:00:11 +01:00
591abc37c3 add test runtime macro (#2409) 2021-10-19 17:30:32 +01:00
ad22cc4e7f bump msrv to 1.52.1 2021-10-19 01:59:28 +01:00
efdf3ab1c3 clippy 2021-10-19 01:32:58 +01:00
6b3ea4fc61 copy original route macro input with compile errors (#2410) 2021-10-14 18:06:31 +01:00
99985fc4ec web: implement into_inner for Data<T: ?Sized> (#2407) 2021-10-12 18:35:33 +01:00
a6707fb7ee Remove checked_expr (#2401) 2021-10-11 18:28:09 +01:00
a3806cde19 fix changelog 2021-09-12 22:41:08 +01:00
efefa0d0ce web: add option to not require content type header for Json (#2362)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-09-11 17:27:50 +01:00
450ff5fa1d improve extract docs (#2384) 2021-09-11 16:48:47 +01:00
8ae278cb68 Remove FromRequest::Config (#2233)
Co-authored-by: Jonas Platte <jplatte@users.noreply.github.com>
Co-authored-by: Igor Aleksanov <popzxc@yandex.ru>
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-09-11 01:11:16 +01:00
46699e3429 remove time dep from actix-http (#2383) 2021-09-11 00:01:01 +01:00
ba88d3b4bf prepare actix-web beta.9 releases (#2381)
* prepare actix-router release 0.5.0-beta.2

* prepare actix-web-codegen release 0.5.0-beta.4

* prepare actix-http release 3.0.0-beta.10

* prepare awc release 3.0.0-beta.8

* prepare actix-web release 4.0.0-beta.9

* prepare actix-http-test release 3.0.0-beta.6

* prepare actix-test release 0.1.0-beta.4

* prepare actix-files release 0.6.0-beta.7

* prepare actix-multipart release 0.4.0-beta.6

* prepare actix-web-actors release 4.0.0-beta.7

* fix http test version

* re-add patch

* update router repo url

* fix http test readme version
2021-09-09 01:35:41 +01:00
8dd30611fa accept owned strings in TestRequest::param (#2172)
* accept owned strings in TestRequest::param

* bump actix-router to 0.4.0

* update changelog

Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-09-09 00:42:40 +01:00
1383c7d701 speed up ci 2021-09-08 17:42:14 +01:00
d8a0f46f26 refactor web module (#2379) 2021-09-03 18:00:43 +01:00
53ec66caf4 Send headers within the redirect requests. (#2310) 2021-09-01 20:16:41 +01:00
93112644d3 non exhaustive content encoding (#2377) 2021-09-01 09:53:26 +01:00
ddc8c16cb3 Fix quality parse error in Accept-Encoding HTTP header (#2344) 2021-09-01 09:08:29 +01:00
373b3f91df rework ResourceMap internals (#2337) 2021-09-01 04:48:43 +01:00
7d01ece355 ResourceDef: support multiple-patterns as prefix (#2356)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-08-31 14:15:22 +01:00
c50eef6166 "deprecate" calls to NormalizePath::default 2021-08-31 04:19:02 +01:00
dade818eba add middleware composition tests (#2375) 2021-08-31 04:18:54 +01:00
ae35e69382 use rust 1.51 features 2021-08-31 02:52:29 +01:00
5128b1bdfc bump msrv to 1.51 2021-08-30 23:19:03 +01:00
168b2f227d compile time validation of path (#2350)
* compile time validation of path

* added trybuild err message

* Update Cargo.toml

* add changelog entry

* test more cases of path validation

* fmt

Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-08-30 21:50:40 +01:00
4bb32fb19b [fix] Bump actix-http dependency to 3.0.0-beta.9, up from 3.0.0-beta.8 (#2360)
Fixes https://rustsec.org/advisories/RUSTSEC-2021-0081
2021-08-30 20:07:12 +01:00
f9da6e48e0 ResourceDef: define behavior for prefix with trailing slash (#2355)
* ResourceDef: define behavior

* fix tests

* add scope test

* revert firestorm bump

* update changelog

* fmt

Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-08-30 20:05:49 +01:00
ff07816b65 update httparse for uninit header parsing (#2374) 2021-08-29 01:42:22 +01:00
5f412c67db clippy 2021-08-13 18:49:58 +01:00
a0c0bff944 Don't create a slice to potential uninit data on h1 encoder (#2364)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-08-13 18:41:19 +01:00
384164cc14 update graphs 2021-08-12 21:04:26 +01:00
e965d8298f HRS security fixes (#2363) 2021-08-12 20:18:09 +01:00
f6e69919ed update to router 0.5.0 beta (#2339) 2021-08-06 22:42:31 +01:00
293c52c3ef re-export ServiceFactory (#2325) 2021-07-12 16:55:41 +01:00
5a14ffeef2 clippy fixes (#2296) 2021-07-12 16:55:24 +01:00
7ae132cb68 Update MIGRATION.md (#2315)
Minor edit
2021-07-12 02:02:19 +01:00
d8deed0475 fix tests with tokio 1.8.1 (#2317) 2021-07-09 23:57:21 +01:00
2504c2ecb0 Move dev module to separate file, update description (#2293) 2021-06-27 07:44:56 +01:00
604be5495f prepare beta.8 releases (#2292) 2021-06-26 16:33:36 +01:00
262c6bc828 Various refactorings (#2281)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-06-26 15:33:43 +01:00
5eba95b731 simplify ConnectionInfo::new (#2282) 2021-06-26 00:39:06 +01:00
09afd033fc files: file path filtering closure (#2274)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-06-25 14:21:57 +01:00
539697292a fix scope and resource middleware data access (#2288) 2021-06-25 13:19:42 +01:00
5a480d1d78 re-add serde error impls 2021-06-25 12:28:04 +01:00
9a26393375 Remove duplicated step from CI workflow (#2289) 2021-06-25 12:27:22 +01:00
2eacb735a4 Don't leak internal macros (#2290) 2021-06-25 12:25:50 +01:00
767e4efe22 Remove downcast macro from actix-http (#2291) 2021-06-25 10:53:53 +01:00
e559a197cc remove comment 2021-06-24 15:30:11 +01:00
93aa86e30b clippy 2021-06-24 15:11:01 +01:00
2d8d2f5ab0 app data doc improvements 2021-06-24 15:10:51 +01:00
083ee05d50 Route::service (#2262)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-06-23 21:30:06 +01:00
ed0516d724 try to fix doc test failures (#2284) 2021-06-23 20:47:17 +01:00
7535a1ade8 Note that Form cannot require data ordering (#2283) 2021-06-23 16:54:25 +01:00
8846808804 ServiceRequest::parts_mut (#2177) 2021-06-23 00:42:00 +01:00
3b6333e65f Propagate error cause to middlewares (#2280) 2021-06-22 22:22:33 +01:00
b1148fd735 Implement FromRequest for request parts (#2263)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-06-22 17:32:03 +01:00
12f7720309 deprecate App::data and App::data_factory (#2271) 2021-06-22 15:50:58 +01:00
2d8530feb3 chore: bump actix to 0.12.0 in actix-web-actors (#2277)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-06-22 14:00:28 +01:00
7faeffc5ab prepare actix-test release 0.1.0-beta.3 2021-06-20 19:47:42 +01:00
f81d4bdae7 remove unused private hidden methods 2021-06-19 23:40:30 +01:00
6893773280 files: allow show_files_listing() with index_file() (#2228) 2021-06-19 21:00:31 +01:00
73a655544e tweak compress feature docs 2021-06-19 20:23:06 +01:00
baa5a663c4 Select compression algorithm using features flags (#2250)
Add compress-* feature flags in actix-http / actix-web / awc.
This allow enable / disable not wanted compression algorithm.
2021-06-19 20:21:13 +01:00
c260fb1c48 beta.7 releases (#2266) 2021-06-19 11:51:20 +01:00
532f7b9923 refined error model (#2253) 2021-06-17 17:57:58 +01:00
bb0331ae28 fix cargo cache on msrv 2021-06-17 16:49:31 +01:00
8d124713fc files: inline disposition for common web app file types (#2257) 2021-06-16 20:33:22 +01:00
fb2b362b60 Adjust JSON limit to 2MB and report on sizes (#2162)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-06-16 15:52:49 +01:00
75f65fea4f Extends Rustls ALPN protocols instead of replacing them when creating Rustls based services (#2226) 2021-06-10 16:25:21 +01:00
812269d656 clarify docs for BodyEncoding::encoding() (#2258) 2021-06-10 15:38:35 +01:00
e46cda5228 Deduplicate rt::main macro logic (#2255) 2021-06-08 22:44:56 +01:00
2e1d761854 add Seal argument to sealed AsHeaderName methods (#2252) 2021-06-08 12:57:19 +01:00
b1e841f168 Don't normalize URIs with no valid path (#2246) 2021-06-05 17:19:45 +01:00
0bb035cfa7 Add information about Actix discord server (#2247) 2021-06-04 02:54:40 +01:00
3479293416 Add zstd ContentEncoding support (#2244)
Co-authored-by: Igor Aleksanov <popzxc@yandex.ru>
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-06-03 21:32:52 +01:00
136dac1352 Additional test coverage and tidyup (middleware::normalize) (#2243) 2021-06-03 03:28:09 +01:00
e5b713b04a files: Fix redirect_to_slash_directory() when used with show_files_listing() (#2225) 2021-05-26 10:42:29 +01:00
3847429d00 Response::from_error take impl Into<Error> (#2214) 2021-05-26 13:41:48 +09:00
bb7d33c9d4 refactor h2 dispatcher to async/await.reduce duplicate code (#2211) 2021-05-25 03:21:20 +01:00
4598a7c0cc Only run UI tests on MSRV (#2232) 2021-05-25 00:09:38 +09:00
b1de196509 Fix clippy warnings (#2217) 2021-05-15 01:13:33 +01:00
2a8c650f2c move internalerror to actix web (#2215) 2021-05-14 16:40:00 +01:00
f277b128b6 cleanup ws test (#2213) 2021-05-13 12:24:32 +01:00
4903950b22 update changelog 2021-05-09 20:15:49 +01:00
f55e8d7a11 remove error field from response 2021-05-09 20:15:48 +01:00
900c9e270e remove responsebody indirection from response (#2201) 2021-05-09 20:12:48 +01:00
a9dc1586a0 remove rogue eprintln 2021-05-07 10:14:25 +01:00
947caa3599 examples use info log level by default 2021-05-06 20:24:18 +01:00
7d1d5c8acd Expose SererBuilder::worker_max_blocking_threads (#2200) 2021-05-06 18:35:04 +01:00
ddaf8c3e43 add associated error type to MessageBody (#2183) 2021-05-05 18:36:02 +01:00
dd1a3e7675 Fix loophole in soundness of __private_get_type_id__ (#2199) 2021-05-05 11:16:12 +01:00
c17662fe39 Reduce the level of the emitted log line from error to debug. (#2196)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-05-03 00:58:14 +01:00
3a0fb3f89e Static either extract future (#2184) 2021-05-01 03:02:56 +01:00
1fcf92e11f Update dependency "language-tags" (#2188) 2021-04-28 01:23:12 +01:00
6a29a50f25 files doc wording 2021-04-22 18:37:45 +01:00
75867bd073 clean up files service docs and rename method
follow on from #2046
2021-04-22 18:31:21 +01:00
f44a0bc159 add support of filtering guards in Files of actix-files (#2046)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-04-22 18:13:13 +01:00
07036b5640 static form extract future (#2181) 2021-04-22 13:54:29 +01:00
a7cd4e85cf use stable codec 0.4.0 2021-04-21 11:14:22 +01:00
6a9c4f1026 update awc docs link, formatting (#2180) 2021-04-20 19:57:27 +01:00
427fe6bd82 improve responseerror trait docs 2021-04-19 23:16:04 +01:00
2aa674c1fd Fix perf drop in HttpResponseBuilder (#2174) 2021-04-19 23:15:57 +01:00
52bb2b5daf hide downcast macros 2021-04-19 03:42:53 +01:00
db97974dc1 make some http re-exports more accessible (#2171) 2021-04-19 03:29:38 +01:00
b9dbc58e20 content disposition methods take impl AsRef<str> 2021-04-19 02:31:11 +01:00
35f8188410 restore cookie methods on ServiceRequest 2021-04-19 02:24:20 +01:00
8ffb1f2011 update files changelog 2021-04-19 02:11:07 +01:00
26e9c80626 Named file service (#2135) 2021-04-18 23:34:51 +01:00
f462aaa7b6 prepare actix-test release 0.1.0-beta.2 2021-04-17 15:53:54 +01:00
5a162932f3 prepare awc release 3.0.0-beta.5 2021-04-17 15:30:31 +01:00
b2d6b6a70c prepare web release 4.0.0-beta.6 2021-04-17 15:28:13 +01:00
f743e885a3 prepare http release 3.0.0-beta.6 2021-04-17 15:24:18 +01:00
5747f84736 bump utils to stable v3 2021-04-17 02:07:33 +01:00
879a4cbcd8 re-export ready boilerplate macros in dev 2021-04-16 23:21:02 +01:00
2449f2555c missed one pipeline_factory 2021-04-16 20:48:37 +01:00
d8f56eee3e bump service to stable v2 2021-04-16 20:28:21 +01:00
8d88a0a9af rename header generator macros 2021-04-16 19:15:10 +01:00
845c02cb86 Add responder impl for Cow<str> (#2164) 2021-04-16 00:54:51 +01:00
64bed506c2 chore: update benchmaks to round 20 (#2163) 2021-04-15 19:11:30 +01:00
ff65f1d006 non exhaustive http errors (#2161) 2021-04-14 06:07:59 +01:00
a9f26286f9 reduce branches in h1 dispatcher poll_keepalive (#2089) 2021-04-14 05:20:45 +01:00
037ac80a32 document messagebody trait items 2021-04-14 03:23:15 +01:00
1bfdfd1f41 implement parts as assoc method 2021-04-14 02:57:28 +01:00
5202bf03c1 add some doc examples to response builder 2021-04-14 02:45:58 +01:00
387c229f28 move response builder code to own file 2021-04-14 02:12:47 +01:00
23e0c9b6e0 remove http-codes builders from actix-http (#2159) 2021-04-14 02:00:14 +01:00
02ced426fd add body to_bytes helper (#2158) 2021-04-13 13:34:22 +01:00
4442535a45 clippy 2021-04-13 12:44:38 +01:00
edd9f14752 remove unpin from body types (#2152) 2021-04-13 11:16:12 +01:00
ce50cc9523 files: Don't use canonical path when serving file (#2156) 2021-04-13 05:28:30 +01:00
981c54432c remove json and url encoded form support from -http (#2148) 2021-04-12 10:30:28 +01:00
44c55dd036 remove cookie support from -http (#2065) 2021-04-09 18:07:10 +01:00
c72d77065d derive debug where possible (#2142) 2021-04-09 03:22:51 +01:00
44a2d2214c update year in MIT license (#2143) 2021-04-09 01:28:35 +01:00
3f5a73793a make module/crate re-exports doc inline (#2141) 2021-04-08 20:51:16 +01:00
e0b2246c68 prepare test release 0.1.0-beta.1 2021-04-02 10:03:01 +01:00
e0ae8e59bf prepare actors release 4.0.0-beta.4 2021-04-02 09:55:35 +01:00
a9641e475a prepare http-test release 3.0.0-beta.4 2021-04-02 09:54:35 +01:00
05c7505563 prepare multipart release 0.4.0-beta.4 2021-04-02 09:45:31 +01:00
8561263545 prepare files release 0.6.0-beta.4 2021-04-02 09:43:51 +01:00
a32151525c prepare awc release 3.0.0-beta.4 2021-04-02 09:40:36 +01:00
546e7c5da4 prepare web release 4.0.0-beta.5 2021-04-02 09:37:51 +01:00
6fb06a720a prepare http release 3.0.0-beta.5 2021-04-02 09:27:11 +01:00
c54a0713de migrate integration testing to new crate (#2112) 2021-04-02 08:26:59 +01:00
50dc13f280 move typed headers and implement FromRequest (#2094)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-04-01 16:42:18 +01:00
c8ed8dd1a4 migrate to -utils beta 4 (#2127) 2021-04-01 15:26:13 +01:00
a807d33600 added TestServer::client_headers (#2097)
Co-authored-by: fakeshadow <24548779@qq.com>
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-04-01 06:40:10 +01:00
1f1be6fd3d add Client::headers (#2114) 2021-03-31 11:43:56 +01:00
c49fe79207 Simplify lifetime annotation in HttpServiceBuilder. Simplify PlStream (#2129) 2021-03-30 15:46:09 +01:00
f66774e30b remove From<OffsetDateTime> impl from HttpDate
fully removes time crate from public api of -http
2021-03-30 03:32:22 +01:00
1281a748d0 merge H1ServiceHandler requests into HttpServiceHandler (#2126) 2021-03-30 03:06:16 +01:00
222acfd070 Fix build for next actix-tls-beta release (#2122) 2021-03-29 13:45:48 +01:00
980ecc5f07 fix openssl windows ci 2021-03-29 13:01:37 +01:00
e8ce73b496 update dep docs 2021-03-29 11:52:59 +01:00
f954a30c34 Fix typo in CHANGES.md (#2124) 2021-03-29 10:18:05 +01:00
60f9cfbb2a Refactor actix_http::h2::service module. Reduce loc. (#2118) 2021-03-26 18:24:51 +00:00
6822bf2f58 Refactor actix_http::h1::service (#2117) 2021-03-26 16:15:04 +00:00
2f7f1fa97a fix broken pipe for h2 when client is instantly dropped (#2113) 2021-03-26 00:05:31 +00:00
8c2ce2dedb fix awc compress feature (#2116) 2021-03-25 22:47:37 +00:00
3188ef5731 don't use rust annotation on code doc blocks 2021-03-25 08:45:52 +00:00
9704beddf8 Relax MessageBody limit to 2048kb (#2110)
* relax MessageBody limit to 2048kb

* fix clippy

* Update awc/src/response.rs

Co-authored-by: Rob Ede <robjtede@icloud.com>

* fix default body limit

Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-03-24 04:44:03 -07:00
1be54efbeb Simplify service factory macro (#2108) 2021-03-23 13:42:46 +00:00
746d983849 handle header error with CustomResponder (#2093) 2021-03-20 05:18:06 +00:00
8d9de76826 Simplify handler factory macro (#2086) 2021-03-19 16:30:53 +00:00
9488757c29 Update to socket2 v0.4 (#2092) 2021-03-19 12:17:06 +00:00
351286486c fix clippy warning on nightly (#2088)
* fix clippy warning on nightly
2021-03-19 19:25:35 +08:00
78fcd0237a Format extract macro (#2087) 2021-03-19 04:08:23 +00:00
81942d31d6 fix new dyn trait lint 2021-03-19 02:03:09 +00:00
b75b5114c3 refactor actix_http connection types and connector services (#2081) 2021-03-18 17:53:22 +00:00
abcb444dd9 fix routes in Path documentation (#2084) 2021-03-18 13:21:44 +00:00
983b6904a7 unvendor openssl 2021-03-17 00:38:54 +00:00
3dc2d145ef import some traits as _ 2021-03-17 00:38:54 +00:00
c8f6d37290 rename client io trait. reduce duplicate code (#2079) 2021-03-16 16:31:14 +00:00
69dd1a9bd6 Remove ConnectionLifetime trait. Simplify Acquired handling (#2072) 2021-03-16 02:56:23 +00:00
d93314a683 fix awc readme example (#2076) 2021-03-15 10:59:42 +00:00
a55e87faaa refactor actix_http::helpers to generic over bufmut trait (#2069) 2021-03-15 02:33:51 +00:00
515d0e3fb4 change behavior of default upgrade handler (#2071) 2021-03-13 22:20:18 +00:00
22dcc31193 Fix logger middleware properly escape %% (#2067) 2021-03-11 14:12:42 +00:00
909ef0344b document client mod removal
closes #2064
2021-03-11 00:43:03 +00:00
a2b0e86632 simplify connector generic type (#2063) 2021-03-10 23:57:32 +00:00
d0c1f1a84c remove actix_http::client::pool::Protocol (#2061) 2021-03-10 01:31:50 +00:00
b62da7e86b prepare actix-web-actors release 4.0.0-beta.3 2021-03-09 23:44:26 +00:00
5e9a3eb6ae prepare actix-multipart release 0.4.0-beta.3 2021-03-09 23:40:50 +00:00
3451d6874f prepare actix-files release 0.6.0-beta.3 2021-03-09 23:39:40 +00:00
18c3783a1c prepare actix-http-test release 3.0.0-beta.3 2021-03-09 23:35:42 +00:00
4b46351d36 prepare actix-web release 4.0.0-beta.4 2021-03-09 23:31:44 +00:00
b7c406637d prepare actix-web-codegen release 0.5.0-beta.2 2021-03-09 23:27:38 +00:00
c4e5651215 update docs 2021-03-08 23:49:12 +00:00
23b0e64199 prepare awc release 3.0.0-beta.3 2021-03-08 23:15:53 +00:00
fc31b091e4 prepare http release 3.0.0-beta.4 2021-03-08 23:07:40 +00:00
effacf8fc8 fix ssl test 2021-03-08 20:51:50 +00:00
95130fcfd0 address clippy warnings 2021-03-08 20:32:19 +00:00
5e81105317 remove ka timer from h2 dispatcher (#2057) 2021-03-08 20:00:20 +00:00
5b4105e1e6 Refactor/client builder (#2053) 2021-03-07 23:57:32 +00:00
2d3a0d6038 json method receives plain serialize (#2052) 2021-03-07 22:11:39 +00:00
fe0b3f459f remove localwaker from h1::payload (#2051) 2021-03-07 21:23:42 +00:00
ca69b6577e use iota for more content-length insertions (#2050) 2021-03-07 19:29:02 +00:00
880b863f95 fix h1 client for handling expect header request (#2049) 2021-03-07 18:33:16 +00:00
78384c3ff5 make actix_http::ws::Codec::new const (#2043) 2021-03-04 19:19:01 +00:00
c1c4400c4a fix h2 tests (#2034) 2021-03-04 13:27:54 +00:00
fc6f974617 Add "name" attribute to route macro (#1934) 2021-03-04 12:38:47 +00:00
14b249b804 update to actix-0.11.0-beta.3 for actix-web-actors (#2042) 2021-03-04 11:39:29 +00:00
0195824794 Update codecov.yml 2021-03-02 13:18:16 +00:00
fb019f15b4 test(files): Fix test and remove outdated case (#2037)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-02-28 23:01:59 +00:00
abc7fd374b update example links (#2036)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-02-28 21:41:07 +00:00
cd652dca75 refactor websocket key hashing (#2035) 2021-02-28 19:55:34 +00:00
c836de44af add client middleware (#2013) 2021-02-28 18:17:08 +00:00
badae2f8fd add local_address bind for client builder (#2024) 2021-02-27 22:31:14 +00:00
1f34718ecd Use once_cell instead of lazy_static (#2029) 2021-02-27 21:55:50 +00:00
ebda60fd6b refactor boxed route (#2033) 2021-02-27 21:00:36 +00:00
d242f57758 fix tests for codecov 2021-02-27 20:58:44 +00:00
b95e1dda34 pin h2 to 0.3.0 2021-02-27 19:57:09 +00:00
8f2a97c6e3 Update README example links (#2027)
The examples repo went through a folder restructuring. More info: https://github.com/actix/examples/pull/411
2021-02-26 01:21:56 +00:00
ebaf25d55a Fix typos in CHANGES.md (#2025)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-02-24 16:19:40 +00:00
42711c23d7 Port over doc comments in route macros. (#2022)
Co-authored-by: Jonas Platte <jplatte@users.noreply.github.com>
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-02-24 12:26:56 +00:00
f6393728c7 remove usage of actix_utils::mpsc (#2023) 2021-02-24 09:08:56 +00:00
d92ab7e8e0 add msrv to clippy config (#1862) 2021-02-22 15:39:31 +00:00
5845b3965c actix-http-test: minimize features of dependencies (#2019) 2021-02-22 12:00:08 +00:00
aacec30ad1 reduce duplicate code (#2020) 2021-02-22 11:15:12 +00:00
2dbdf61c37 Inner field of web::Query is public again (#2016) (#2017)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-02-20 17:59:09 +00:00
83365058ce Fix HTTP client link (#2011) 2021-02-18 21:56:24 +00:00
3b93c62e23 Fix Json extractor to be 32kB by default (#2010) 2021-02-18 15:20:20 +00:00
946cccaa1a refactor awc::ClientBuilder (#2008) 2021-02-18 12:30:09 +00:00
1838d9cd0f remove unused method. reduce leaf future type (#2009) 2021-02-18 11:24:10 +00:00
f62a982a51 simplify the match on h1 message type (#2006) 2021-02-18 10:38:27 +00:00
dfd9dc40ea remove awc::connect::connect trait. (#2004) 2021-02-17 17:10:46 +00:00
5efea652e3 add ClientResponse::timeout (#1931) 2021-02-17 11:55:11 +00:00
dfa795ff9d return poll in poll_flush (#2005) 2021-02-17 11:18:31 +00:00
2cc6b47fcf Use http-range library for HttpRange (#2003) 2021-02-16 18:48:16 +00:00
117025a96b simplify client::connection::Connection trait (#1998) 2021-02-16 14:10:22 +00:00
3e0a9b99ff update rust-cache action 2021-02-16 09:28:14 +00:00
17b3e7e225 pool doc nits (#1999) 2021-02-16 09:08:30 +00:00
c065729468 rework client connection pool (#1994) 2021-02-16 08:27:14 +00:00
55db3ec65c split up http body module 2021-02-15 12:20:43 +00:00
0404b78b54 improve body size docs 2021-02-15 11:24:46 +00:00
68d1bd88b1 remove unused flag upgrade (#1992) 2021-02-14 18:13:05 +00:00
308b70b039 fix potential over read (#1991) 2021-02-14 17:36:18 +00:00
7fa6333a0c use rcgen for tls key generation (#1989) 2021-02-13 17:16:36 +00:00
3279070f9f optional cookies features (#1981) 2021-02-13 15:08:43 +00:00
b37669cb3b fix notify on drop (#1987) 2021-02-13 04:23:37 +00:00
1e538bf73e rework ci (#1982) 2021-02-12 21:53:21 +00:00
366c032c36 refactor DateService (#1983) 2021-02-12 21:52:58 +00:00
95113ad12f do not self wake up when have a payload (#1984) 2021-02-12 20:33:13 +00:00
ce9b2770e2 remove unused Dispatcher::new_timeout (#1985) 2021-02-12 10:37:28 +00:00
4fc7d76759 s/websocket/WebSocket in docs 2021-02-12 00:27:20 +00:00
81bef93e5e add time parser year shift tests 2021-02-12 00:15:25 +00:00
31d9ed81c5 change rustfmt line width to 96 2021-02-11 23:03:17 +00:00
c1af5089b9 add 431 and 451 status codes 2021-02-11 22:58:40 +00:00
77efc09362 hide httpmessage mod 2021-02-11 22:58:40 +00:00
871ca5e4ae stop claiming actor support 2021-02-11 22:58:40 +00:00
ceace26ed4 remove unused flag POLLED (#1980) 2021-02-11 14:19:14 -08:00
75a9a72e78 clean up poll_response. add comments (#1978) 2021-02-11 14:54:42 +00:00
d9d0d1d1a2 reduce unsafe (#1972) 2021-02-10 23:11:12 +00:00
ea5ce3befb prepare actix-http 3.0.0-beta.3 release 2021-02-10 18:36:14 +00:00
e18464b274 bump actix web versions in deps 2021-02-10 12:57:13 +00:00
bd26083f33 prepare codegen 0.5.0-beta.1 release 2021-02-10 12:45:46 +00:00
991363a104 consistent case s/web/Web 2021-02-10 12:12:03 +00:00
a290e58982 prepare beta 2 release set (#1975) 2021-02-10 12:10:03 +00:00
dcad9724bc ensure poll_flush on h1 connection disconnect (#1974) 2021-02-10 10:11:53 +00:00
949d14ae2b clean up header map (#1964) 2021-02-09 22:59:17 +00:00
a6ed4aee84 add poll_flush after a non blocked write to h1 dispatcher (#1971) 2021-02-09 22:32:46 +00:00
519d7f2b8a add trust-dns optional feature for actix-http and awc (#1969) 2021-02-09 10:41:20 +00:00
dddb623a11 add services register for tuple and vec of services (#1933) 2021-02-07 23:47:51 +00:00
266cf0622c reduce branch.remove deadcode for h1 dispatcher (#1962) 2021-02-07 22:48:27 +00:00
9604e249c9 use stable clippy (#1963) 2021-02-07 20:33:53 +00:00
dbc47c9122 optimize actix-http messages (#1914) 2021-02-07 20:19:10 +00:00
4c243cbf89 simplify methods of awc::connect::Connect trait (#1941) 2021-02-07 18:56:39 +00:00
deafb7c8b8 Improve impl ResponseError documentation (#1939)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-02-07 04:54:41 +00:00
50309aa295 Use askama-escape for html escaping (#1953) 2021-02-07 04:50:23 +00:00
9eaea6a2fd tweak feature flags 2021-02-07 03:54:58 +00:00
830fb2cdb2 properly drop h2 connection (#1926) 2021-02-07 03:51:36 +00:00
7cfed73be8 fix memory usage for h1 and read bug on buffer size. (#1929) 2021-02-07 03:20:35 +00:00
41bc04b1c4 Use immutable reference of service state. Update awc dns resolver. (#1905) 2021-02-07 01:00:40 +00:00
20cf0094e5 fix master branch build. change web::block output type. (#1957) 2021-02-06 16:23:59 +00:00
83fb4978ad fix awc test_client test (#1960) 2021-02-06 16:05:33 +00:00
51e54dac8b fix limit not working on HttpMessageBody::limit (#1938) 2021-01-27 10:49:57 +00:00
c201c15f8c Improve documentation for PayloadConfig (#1923) 2021-01-24 00:32:10 +00:00
0c8196f8b0 Remove HttpResponseBuilder::json2() (#1903)
It's not necessary to keep both json() and json2() around since the
former reduces the ownership of its parameter to a borrow only to pass
the reference to the latter. Users can instead borrow themselves when
passing an owned value: there doesn't need to be two separate functions.

This change also makes HttpResponseBuilder::json() take T: Deref so it
can accept both references and web extractors like web::Json.
2021-01-18 12:14:29 +00:00
ee10148444 revive commented out tests (#1912) 2021-01-17 05:19:32 +00:00
1c95fc2654 Refactor poll_keepalive for readability (#1901) 2021-01-16 00:15:06 +00:00
da69bb4d12 implement App::data as App::app_data(Data::new(T))) (#1906) 2021-01-15 23:37:33 +00:00
0a506bf2e9 cleanup top level doc comments 2021-01-15 05:38:50 +00:00
b2a9ba2ee4 Update PULL_REQUEST_TEMPLATE.md 2021-01-15 04:54:23 +00:00
f976150b67 return option item from Extensions::insert (#1904) 2021-01-15 04:22:42 +00:00
b1dd8d28bc response header rework (#1869) 2021-01-15 02:11:10 +00:00
4edeb5ce47 optimize ErrorHandler middleware (#1902) 2021-01-14 01:43:44 +00:00
d34a8689e5 Refactor h1 encoder (#1900) 2021-01-12 14:38:53 +00:00
a919d2de56 actix-files: Fix If-(Un)Modified to not consider sub-seconds (#1887) 2021-01-11 18:18:23 +00:00
46a8f28b74 fix actix-files doc about thread pool (#1898) 2021-01-11 17:27:33 +00:00
57398c6df1 Refactor/service request (#1893) 2021-01-11 01:29:16 +00:00
7affc6878e simplify h1 dispatcher (#1899)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-01-11 00:13:56 +00:00
46b2f7eaaf use a non leak pool for HttpRequestInner (#1889)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-01-10 22:59:44 +00:00
9e401b6ef7 refactor Scope (#1895) 2021-01-09 18:06:49 +00:00
fe392abeb4 remove actix-threadpool.use actix_rt::task::spawn_blocking (#1878) 2021-01-09 16:04:19 +00:00
f6cc829758 remove leaked box in REQUEST_POOL and RESPONSE_POOL (#1896) 2021-01-09 15:40:20 +00:00
6575ee93f2 big clean up and docs improvmenet of types mod (#1894) 2021-01-09 13:17:19 +00:00
530d03791d refactor Resource (#1883) 2021-01-09 03:36:58 +00:00
d40ae8c8ca use sync method on Responder trait (#1891) 2021-01-08 22:17:19 +00:00
2204614134 don't run awc doctests that rely on external public endpoints (#1888) 2021-01-08 12:00:58 +00:00
188ee44f81 remove copyless dependency (#1884) 2021-01-07 21:55:00 +00:00
a4c9aaf337 fix extra branch in h1 dispatcher timer (#1882) 2021-01-07 20:42:09 +00:00
c09186a2c0 prepare v4 beta releases (#1881) 2021-01-07 20:02:08 +00:00
d3c476b8c2 use env_logger builders in examples 2021-01-07 02:41:05 +00:00
dc23559f23 address clippy lints 2021-01-07 02:04:26 +00:00
6d710629af fix bug where upgrade future is not reset properly (#1880) 2021-01-07 00:57:34 +00:00
85753130d9 fmt 2021-01-07 00:35:19 +00:00
00ba8d5549 add http3 variant to protocol enum 2021-01-06 18:58:24 +00:00
51e9e1500b add docs to recent additions 2021-01-06 18:52:06 +00:00
a03dbe2dcf replace cloneable service with httpflow abstraction (#1876) 2021-01-06 18:43:52 +00:00
57a3722146 More refactor of app_service (#1879) 2021-01-06 18:11:20 +00:00
57da1d3c0f refactor app_service (#1877) 2021-01-06 11:35:30 +00:00
68117543ea major cleanup of middleware module (#1875)
* major cleanup of middleware module

* update changelog
2021-01-05 09:51:58 +00:00
4f5971d79e add Compat middleware (#1865) 2021-01-05 00:22:57 +00:00
93161df141 clean up body type (#1872) 2021-01-04 23:47:38 +00:00
e567873326 optimize message pool release (#1871) 2021-01-04 13:03:46 +00:00
7d632d0b7b use ByteString as container for websocket text message (#1864) 2021-01-04 11:27:32 +00:00
36aee18c64 fmt 2021-01-04 04:33:15 +00:00
007a145988 use ahash for internal hashmaps 2021-01-04 04:29:07 +00:00
2d4a174420 fmt 2021-01-04 01:01:35 +00:00
21f6c9d7a5 improve code readability 2021-01-04 00:49:02 +00:00
e1683313ec optimize ServiceRequest methods (#1870)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-01-04 00:32:41 +00:00
32de9f8840 Tokio 1.0 (#1813)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-01-03 23:47:04 +00:00
1f202d40e4 optimize write_camel_case in h1 encoder (#1868) 2021-01-03 16:53:01 +00:00
ad608aa64e optimize Resource and Scope service call (#1867) 2021-01-02 19:40:31 +00:00
a1b00b2cd0 change unreleased year 2021-01-02 00:12:18 +00:00
3beb4cf2da replace tinyvec with smallvec (#1866) 2021-01-01 23:18:25 +00:00
522c9a5ea6 update CoC text 2020-12-31 03:24:18 +00:00
102bb8f9ab update dot dep graphs 2020-12-29 00:22:28 +00:00
20b46cdaf9 format factory_tuple macro invocations (#1859) 2020-12-28 21:04:02 +00:00
2a2a20c3e7 bump msrv to 1.46 (#1858) 2020-12-28 00:44:15 +00:00
093d3a6c59 remove deprecated on_connect methods (#1857) 2020-12-27 23:23:30 +00:00
8c9ea43e23 address clippy warnings 2020-12-27 20:54:04 +00:00
f9fcf56d5c reduce branch in actix_http::h1::codec (#1854) 2020-12-27 20:37:53 +00:00
cbda928a33 Rename factory to handler (#1852) 2020-12-26 21:46:19 +00:00
1032f04ded remove unused actix_http::h1::OneRequest (#1853) 2020-12-26 12:46:36 +00:00
b373e1370d prepare files 0.5.0 release 2020-12-26 04:05:45 +00:00
404b5a7709 Add optional support for hidden files/directories (#1811) 2020-12-26 03:36:15 +00:00
ecf08d5156 Remove boxed future from h1 Dispatcher (#1836) 2020-12-24 19:15:17 +00:00
87655b3028 reduce one clone on Arc. (#1850) 2020-12-23 23:58:25 +00:00
3a192400a6 Simplify handler (#1843) 2020-12-23 15:47:07 +00:00
2a7f2c1d59 dispatcher internals testing (#1840) 2020-12-23 01:28:17 +00:00
05f104c240 improve NormalizePath docs (#1839) 2020-12-23 00:19:20 +00:00
4dccd092f3 Bump rand from 0.7.x to 0.8.x (#1845) 2020-12-22 23:45:31 +00:00
95ccf1c9bc replace actix_utils::oneshot with futures_channle::oneshot (#1844) 2020-12-21 16:42:20 +00:00
6cbf27508a simplify ExtractService's return type (#1842) 2020-12-20 02:20:29 +00:00
79de04d862 optimise Extract service (#1841) 2020-12-19 16:33:34 +00:00
a4dbaa8ed1 remove boxed future in DefaultHeaders middleware (#1838) 2020-12-18 23:08:59 +00:00
c7b4c6edfa Disable PR comment from codecov 2020-12-17 21:38:52 +09:00
2a5215c1d6 Remove boxed future from HttpMessage (#1834) 2020-12-17 11:40:49 +00:00
97f615c245 remove boxed futures on Json extract type (#1832) 2020-12-16 23:34:33 +00:00
1a361273e7 optimize bytes and string payload extractors (#1831) 2020-12-16 22:40:26 +00:00
d7ce648445 remove boxed future for Option<T> and Result<T, E> extract type (#1829)
* remove boxed future for Option<T> and Result<T, E> extract type

* use ready macro

* fix fmt
2020-12-16 18:34:10 +00:00
fabc68659b Intradoc links conversion (#1827)
* switching to nightly for intra-doc links

* actix-files intra-doc conversion

* more specific Result

* intradoc conversion complete

* rm blank comments and readme doc link fixes

* macros and broken links
2020-12-13 13:28:39 +00:00
542db82282 Simplify wake up of task (#1826) 2020-12-12 20:07:06 +00:00
ae63eb8bb2 fix clippy warnings (#1806)
* fix clippy warnings

* prevent CI fail status caused by codecov
2020-12-09 11:22:19 +00:00
7a3776b770 remove two unused generics on BoxedRouteFuture types. (#1820) 2020-12-09 10:47:59 +00:00
ff79c33fd4 remove a box (#1814) 2020-12-06 11:42:15 +00:00
b75a9b7a20 add error to message in test helper func (#1812) 2020-12-05 04:57:56 +09:00
d0c6ca7671 test-server => actix-http-test (#1807) 2020-12-02 17:23:30 +00:00
24d525d978 prepare web 3.3.2 release 2020-12-01 22:22:46 +00:00
1f70ef155d Fix match_pattern() returning None for scope with resource of empty path (#1798)
* fix match_pattern function not returning pattern where scope has resource of path ""

* remove print in test

* make comparison on existing else if block

* add fix to changelog
2020-12-01 13:39:41 +00:00
7981e0068a Remove a panic in normalize middleware (#1762)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-12-01 10:22:15 +09:00
32d59ca904 Upgrade socket2 dependency (#1803)
Upgrades to a version not making invalid assumptions about
the memory layout of std::net::SocketAddr
2020-12-01 04:18:02 +09:00
ea8bf36104 update web and awc changelogs 2020-11-29 16:35:35 +00:00
0b5b463cfa prepare web and awc releases
closes #1799
2020-11-29 16:33:45 +00:00
fe6ad816cc update dotgraphs 2020-11-25 00:54:00 +00:00
e72b787ba7 prepare actix-web and actix-http-test releases 2020-11-25 00:53:48 +00:00
efc317d3b0 prepare actix-http and awc releases 2020-11-25 00:07:56 +00:00
31057becca prepare actix-files release 0.4.1 2020-11-24 20:33:23 +00:00
f1a9b45437 improve docs for Files::new 2020-11-24 20:23:09 +00:00
5af46775b8 refactor quality and use TryFrom instead of custom trait (#1797) 2020-11-24 11:37:05 +00:00
70f4747a23 add method for getting accept type preference (#1793) 2020-11-24 10:08:57 +00:00
2f11ef089b fix rustdoc uploads 2020-11-24 00:29:13 +00:00
4100c50c70 add either extractor (#1788) 2020-11-20 18:02:41 +00:00
a929209967 actix-files intra-doc migration (#1785) 2020-11-10 23:54:38 +00:00
49e945c88f switching to nightly for intra-doc links (#1783) 2020-11-09 14:01:36 +00:00
9b42333fac Fix typo in Query extractor docs (#1777) 2020-11-06 13:34:42 +00:00
e5b86d189c Fix typo in request_data.rs (#1774) 2020-11-05 17:46:17 +00:00
4bfd5c2781 Upgrade serde_urlencoded to 0.7 (#1773) 2020-11-06 01:36:15 +09:00
9b6a089b36 fix awc doc example (#1772)
* fix awc readme example

Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-11-05 06:20:01 +08:00
ceac97bb8d Update config.yml 2020-11-04 15:08:12 +00:00
61b65aa64a add common 1xx http response builders (#1768)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-11-02 18:23:18 +09:00
5468c3c410 Drop content length headers from 101 responses (#1767)
Co-authored-by: Sebastian Mayr <smayr@atlassian.com>
2020-11-02 17:44:14 +09:00
b6385c2b4e Remove CoC on actix-http as duplicated 2020-10-31 12:12:19 +09:00
5135c1e3a0 Update CoC contact information 2020-10-31 12:06:51 +09:00
22b451cf2d fix deps.rs badge 2020-10-31 02:39:54 +00:00
42f51eb962 prepare web release 3.2.0 2020-10-30 03:15:22 +00:00
156c97cef2 prepare awc release 2.0.1 2020-10-30 02:50:53 +00:00
798d744eef prepare http release 2.1.0 2020-10-30 02:19:56 +00:00
4cb833616a deprecate builder if-x methods (#1760) 2020-10-30 02:10:05 +00:00
9963a5ef54 expose on_connect v2 (#1754)
Co-authored-by: Mikail Bagishov <bagishov.mikail@yandex.ru>
2020-10-30 02:03:26 +00:00
4519db36b2 register fns for custom request-derived logging units (#1749)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-10-29 18:38:49 +00:00
7030bf5fe8 Adding app_data to ServiceConfig (#1758)
Co-authored-by: Rob Ede <robjtede@icloud.com>
Co-authored-by: Augusto <augusto@flowciety.de>
2020-10-26 17:02:45 +00:00
20078fe603 Bump pin-project to 1.0 (#1733) 2020-10-25 19:41:44 +09:00
06e5042b94 use idenity encoding on client if no compression features are enabled (#1737)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-10-24 21:15:01 +01:00
41e7cec72f Re-export bytes::Buf and bytes::BufMut as well (#1750)
Co-authored-by: Daniel Egger <daniel.egger@axiros.com>
Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-10-24 20:31:23 +01:00
d45a1aa6b6 Add web::ReqData<T> extractor (#1748)
Co-authored-by: Jonas Platte <jonas@lumeo.com>
2020-10-24 18:49:50 +01:00
98243db9f1 Print unconfigured Data<T> type when attempting extraction (#1743)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-10-20 17:35:34 +01:00
f92742bdac Bump base64 to 0.13 (#1744) 2020-10-19 18:24:22 +01:00
e563025b16 always construct shortslice using debug checked new constructor (#1741) 2020-10-19 12:51:30 +01:00
cfd5b381f1 Implement Logger middleware regex exclude pattern (#1723)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-10-19 07:18:16 +01:00
2f84914146 Skip some tests that cause ICE on nightly (#1740) 2020-10-19 11:52:05 +09:00
d765e9099d Fix clippy::rc_buffer (#1728) 2020-10-10 09:26:05 +09:00
34b23f31c9 prepare files release 0.4.0 2020-10-06 22:08:33 +01:00
26c1a901d9 add files preference for utf8 text responses (#1714) 2020-10-06 21:56:28 +01:00
c2c71cc626 Fix/suppress clippy warnings (#1720) 2020-10-01 18:19:09 +09:00
aa11231ee5 prepare web release 3.1.0 (#1716) 2020-09-30 11:07:35 +01:00
b5812b15f0 Remove Sized Bound for web::Data (#1712) 2020-09-29 22:44:12 +01:00
b4e02fe29a Fix cyclic references in ResourceMap (#1708) 2020-09-25 17:42:49 +01:00
37c76a39ab Fix Multipart consuming payload before header checks (#1704)
* Fix Multipart consuming payload before header checks

What
--
Split up logic in the constructor into two functions:

- **from_boundary:** build Multipart from boundary and stream
- **from_error:** build Multipart for MultipartError

Also we make the `boundary`, `from_boundary`, `from_error`  methods public within the crate so that we can use them in the extractor.

The extractor is then able to perform header checks and only consume the
payload if the checks pass.

* Add tests

* Add payload consumption test

Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-09-25 14:50:37 +01:00
60e7e52276 Add TrailingSlash::MergeOnly behavior (#1695)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-09-25 12:50:59 +01:00
c53e9468bc prepare codegen 0.4.0 release (#1702) 2020-09-24 23:54:01 +01:00
162121bf8d Unify route macros (#1705) 2020-09-22 22:42:51 +01:00
f7bcad9567 split up files lib (#1685) 2020-09-20 23:18:25 +01:00
f9e3f78e45 eemove non-relevant comment from actix-http README.md (#1701) 2020-09-20 17:21:53 +01:00
1596893ef7 update actix-http dev-dependencies (#1696)
Co-authored-by: luojinming <luojm@hxsmart.com>
2020-09-19 23:20:34 +09:00
2a2474ca09 Update tinyvec to 1.0 (#1689) 2020-09-17 18:09:42 +01:00
509b2e6eec Provide attribute macro for multiple HTTP methods (#1674)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-09-16 22:37:41 +01:00
d707704556 prepare web release 3.0.2 (#1681) 2020-09-15 13:14:14 +01:00
a429ee6646 Add possibility to set address for test_server (#1645) 2020-09-15 12:09:16 +01:00
7f8073233a fix trimming to inaccessible root path (#1678) 2020-09-15 11:32:31 +01:00
4b4c9d1b93 update migration guide
closes #1680
2020-09-14 22:26:03 +01:00
3fde3be3d8 add trybuild tests to routing codegen (#1677) 2020-09-13 16:31:08 +01:00
f861508789 prepare web release 3.0.1 (#1676) 2020-09-13 03:24:44 +01:00
a4546f02d2 make TrailingSlash enum accessible (#1673)
Co-authored-by: Damian Lesiuk <lesiuk@sabre.com>
2020-09-13 00:55:39 +01:00
367 changed files with 45845 additions and 26958 deletions

14
.cargo/config.toml Normal file
View File

@ -0,0 +1,14 @@
[alias]
lint = "clippy --workspace --tests --examples --bins -- -Dclippy::todo"
lint-all = "clippy --workspace --all-features --tests --examples --bins -- -Dclippy::todo"
# lib checking
ci-check-min = "hack --workspace check --no-default-features"
ci-check-default = "hack --workspace check"
ci-check-default-tests = "check --workspace --tests"
ci-check-all-feature-powerset="hack --workspace --feature-powerset --skip=__compress,io-uring check"
ci-check-all-feature-powerset-linux="hack --workspace --feature-powerset --skip=__compress check"
# testing
ci-doctest-default = "test --workspace --doc --no-fail-fast -- --nocapture"
ci-doctest = "test --workspace --all-features --doc --no-fail-fast -- --nocapture"

3
.github/FUNDING.yml vendored Normal file
View File

@ -0,0 +1,3 @@
# These are supported funding model platforms
github: [robjtede]

View File

@ -33,5 +33,5 @@ Please search on the [Actix Web issue tracker](https://github.com/actix/actix-we
## Your Environment
<!--- Include as many relevant details about the environment you experienced the bug in -->
* Rust Version (I.e, output of `rustc -V`):
* Actix Web Version:
- Rust Version (I.e, output of `rustc -V`):
- Actix Web Version:

View File

@ -1,8 +1,8 @@
blank_issues_enabled: true
contact_links:
- name: Gitter channel (actix-web)
url: https://gitter.im/actix/actix-web
about: Please ask and answer questions about the actix-web here.
- name: Gitter channel (actix)
url: https://gitter.im/actix/actix
about: Please ask and answer questions about the actix here.
- name: Actix Discord
url: https://discord.gg/NWpN5mmg3x
about: Actix developer discussion and community chat
- name: GitHub Discussions
url: https://github.com/actix/actix-web/discussions
about: Actix Web Q&A

View File

@ -1,21 +1,21 @@
<!-- Thanks for considering contributing actix! -->
<!-- Please fill out the following to make our reviews easy. -->
<!-- Please fill out the following to get your PR reviewed quicker. -->
## PR Type
<!-- What kind of change does this PR make? -->
<!-- Bug Fix / Feature / Refactor / Code Style / Other -->
INSERT_PR_TYPE
PR_TYPE
## PR Checklist
Check your PR fulfills the following:
<!-- Check your PR fulfills the following items. -->
<!-- For draft PRs check the boxes as you complete them. -->
- [ ] Tests for the changes have been added / updated.
- [ ] Documentation comments have been added / updated.
- [ ] A changelog entry has been made for the appropriate packages.
- [ ] Format code with the latest stable rustfmt
- [ ] Format code with the latest stable rustfmt.
- [ ] (Team) Label with affected crates and semver status.
## Overview

View File

@ -1,8 +1,6 @@
name: Benchmark (Linux)
name: Benchmark
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master

153
.github/workflows/ci-master.yml vendored Normal file
View File

@ -0,0 +1,153 @@
name: CI (master only)
on:
push:
branches: [master]
jobs:
build_and_test_nightly:
strategy:
fail-fast: false
matrix:
target:
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
- { name: Windows, os: windows-2022, triple: x86_64-pc-windows-msvc }
version:
- nightly
name: ${{ matrix.target.name }} / ${{ matrix.version }}
runs-on: ${{ matrix.target.os }}
env:
CI: 1
CARGO_INCREMENTAL: 0
VCPKGRS_DYNAMIC: 1
steps:
- uses: actions/checkout@v2
# install OpenSSL on Windows
# TODO: GitHub actions docs state that OpenSSL is
# already installed on these Windows machines somewhere
- name: Set vcpkg root
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append
- name: Install OpenSSL
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: vcpkg install openssl:x64-windows
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-${{ matrix.target.triple }}
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- name: check minimal
uses: actions-rs/cargo@v1
with: { command: ci-check-min }
- name: check default
uses: actions-rs/cargo@v1
with: { command: ci-check-default }
- name: tests
timeout-minutes: 60
run: |
cargo test --lib --tests -p=actix-router --all-features
cargo test --lib --tests -p=actix-http --all-features
cargo test --lib --tests -p=actix-web --features=rustls,openssl -- --skip=test_reading_deflate_encoding_large_random_rustls
cargo test --lib --tests -p=actix-web-codegen --all-features
cargo test --lib --tests -p=awc --all-features
cargo test --lib --tests -p=actix-http-test --all-features
cargo test --lib --tests -p=actix-test --all-features
cargo test --lib --tests -p=actix-files
cargo test --lib --tests -p=actix-multipart --all-features
cargo test --lib --tests -p=actix-web-actors --all-features
- name: tests (io-uring)
if: matrix.target.os == 'ubuntu-latest'
timeout-minutes: 60
run: >
sudo bash -c "ulimit -Sl 512
&& ulimit -Hl 512
&& PATH=$PATH:/usr/share/rust/.cargo/bin
&& RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo test --lib --tests -p=actix-files --all-features"
- name: Clear the cargo caches
run: |
cargo install cargo-cache --version 0.6.3 --no-default-features --features ci-autoclean
cargo-cache
ci_feature_powerset_check:
name: Verify Feature Combinations
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install stable
uses: actions-rs/toolchain@v1
with:
toolchain: stable-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- name: check feature combinations
uses: actions-rs/cargo@v1
with: { command: ci-check-all-feature-powerset }
- name: check feature combinations
uses: actions-rs/cargo@v1
with: { command: ci-check-all-feature-powerset-linux }
coverage:
name: coverage
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install stable
uses: actions-rs/toolchain@v1
with:
toolchain: stable-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Generate coverage file
run: |
cargo install cargo-tarpaulin --vers "^0.13"
cargo tarpaulin --workspace --features=rustls,openssl --out Xml --verbose
- name: Upload to Codecov
uses: codecov/codecov-action@v1
with: { file: cobertura.xml }

120
.github/workflows/ci.yml vendored Normal file
View File

@ -0,0 +1,120 @@
name: CI
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches: [master]
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
target:
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
- { name: Windows, os: windows-2022, triple: x86_64-pc-windows-msvc }
version:
- 1.54.0 # MSRV
- stable
name: ${{ matrix.target.name }} / ${{ matrix.version }}
runs-on: ${{ matrix.target.os }}
env:
CI: 1
CARGO_INCREMENTAL: 0
VCPKGRS_DYNAMIC: 1
steps:
- uses: actions/checkout@v2
# install OpenSSL on Windows
# TODO: GitHub actions docs state that OpenSSL is
# already installed on these Windows machines somewhere
- name: Set vcpkg root
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append
- name: Install OpenSSL
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: vcpkg install openssl:x64-windows
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-${{ matrix.target.triple }}
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- name: check minimal
uses: actions-rs/cargo@v1
with: { command: ci-check-min }
- name: check default
uses: actions-rs/cargo@v1
with: { command: ci-check-default }
- name: tests
timeout-minutes: 60
run: |
cargo test --lib --tests -p=actix-router --all-features
cargo test --lib --tests -p=actix-http --all-features
cargo test --lib --tests -p=actix-web --features=rustls,openssl -- --skip=test_reading_deflate_encoding_large_random_rustls
cargo test --lib --tests -p=actix-web-codegen --all-features
cargo test --lib --tests -p=awc --all-features
cargo test --lib --tests -p=actix-http-test --all-features
cargo test --lib --tests -p=actix-test --all-features
cargo test --lib --tests -p=actix-files
cargo test --lib --tests -p=actix-multipart --all-features
cargo test --lib --tests -p=actix-web-actors --all-features
- name: tests (io-uring)
if: matrix.target.os == 'ubuntu-latest'
timeout-minutes: 60
run: >
sudo bash -c "ulimit -Sl 512
&& ulimit -Hl 512
&& PATH=$PATH:/usr/share/rust/.cargo/bin
&& RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo test --lib --tests -p=actix-files --all-features"
- name: Clear the cargo caches
run: |
cargo install cargo-cache --version 0.6.3 --no-default-features --features ci-autoclean
cargo-cache
rustdoc:
name: doc tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust (nightly)
uses: actions-rs/toolchain@v1
with:
toolchain: nightly-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.3.0
- name: doc tests
uses: actions-rs/cargo@v1
timeout-minutes: 60
with: { command: ci-doctest }

View File

@ -1,32 +1,48 @@
name: Lint
on:
pull_request:
types: [opened, synchronize, reopened]
name: Clippy and rustfmt Check
jobs:
clippy_check:
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
components: rustfmt
override: true
- name: Check with rustfmt
uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
- uses: actions-rs/toolchain@v1
clippy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: nightly
toolchain: stable
profile: minimal
components: clippy
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Check with Clippy
uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --all-features --all --tests
args: --workspace --tests --examples --all-features

View File

@ -1,69 +0,0 @@
name: CI (Linux)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- 1.42.0 # MSRV
- stable
- nightly
name: ${{ matrix.version }} - x86_64-unknown-linux-gnu
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --all --all-features --no-fail-fast -- --nocapture
- name: tests (actix-http)
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --package=actix-http --no-default-features --features=rustls -- --nocapture
- name: tests (awc)
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --package=awc --no-default-features --features=rustls -- --nocapture
- name: Generate coverage file
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
run: |
cargo install cargo-tarpaulin --vers "^0.13"
cargo tarpaulin --out Xml
- name: Upload to Codecov
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
uses: codecov/codecov-action@v1
with:
file: cobertura.xml

View File

@ -1,44 +0,0 @@
name: CI (macOS)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- stable
- nightly
name: ${{ matrix.version }} - x86_64-apple-darwin
runs-on: macOS-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-apple-darwin
profile: minimal
override: true
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
with:
command: test
args: --all --all-features --no-fail-fast -- --nocapture
--skip=test_h2_content_length
--skip=test_reading_deflate_encoding_large_random_rustls

View File

@ -1,14 +1,12 @@
name: Upload documentation
name: Upload Documentation
on:
push:
branches:
- master
branches: [master]
jobs:
build:
runs-on: ubuntu-latest
if: github.repository == 'actix/actix-web'
steps:
- uses: actions/checkout@v2
@ -16,21 +14,21 @@ jobs:
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable-x86_64-unknown-linux-gnu
toolchain: nightly-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: check build
- name: Build Docs
uses: actions-rs/cargo@v1
with:
command: doc
args: --no-deps --workspace --all-features
args: --workspace --all-features --no-deps
- name: Tweak HTML
run: echo "<meta http-equiv=refresh content=0;url=os_balloon/index.html>" > target/doc/index.html
run: echo '<meta http-equiv="refresh" content="0;url=actix_web/index.html">' > target/doc/index.html
- name: Deploy to GitHub Pages
uses: JamesIves/github-pages-deploy-action@3.5.8
uses: JamesIves/github-pages-deploy-action@3.7.1
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BRANCH: gh-pages

View File

@ -1,64 +0,0 @@
name: CI (Windows)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
env:
VCPKGRS_DYNAMIC: 1
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- stable
- nightly
name: ${{ matrix.version }} - x86_64-pc-windows-msvc
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-pc-windows-msvc
profile: minimal
override: true
- name: Install OpenSSL
run: |
vcpkg integrate install
vcpkg install openssl:x64-windows
Copy-Item C:\vcpkg\installed\x64-windows\bin\libcrypto-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libcrypto.dll
Copy-Item C:\vcpkg\installed\x64-windows\bin\libssl-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libssl.dll
Get-ChildItem C:\vcpkg\installed\x64-windows\bin
Get-ChildItem C:\vcpkg\installed\x64-windows\lib
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
with:
command: test
args: --all --all-features --no-fail-fast -- --nocapture
--skip=test_h2_content_length
--skip=test_reading_deflate_encoding_large_random_rustls
--skip=test_params
--skip=test_simple
--skip=test_expect_continue
--skip=test_http10_keepalive
--skip=test_slow_request
--skip=test_connection_force_close
--skip=test_connection_server_close
--skip=test_connection_wait_queue_force_close

6
.gitignore vendored
View File

@ -13,3 +13,9 @@ guide/build/
# These are backup files generated by rustfmt
**/*.rs.bk
# Configuration directory generated by CLion
.idea
# Configuration directory generated by VSCode
.vscode

File diff suppressed because it is too large Load Diff

View File

@ -8,19 +8,19 @@ In the interest of fostering an open and welcoming environment, we as contributo
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
- Using welcoming and inclusive language
- Being respectful of differing viewpoints and experiences
- Gracefully accepting constructive criticism
- Focusing on what is best for the community
- Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
- The use of sexualized language or imagery and unwelcome sexual attention or advances
- Trolling, insulting/derogatory comments, and personal or political attacks
- Public or private harassment
- Publishing others' private information, such as a physical or electronic address, without explicit permission
- Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities
@ -34,10 +34,13 @@ This Code of Conduct applies both within project spaces and in public spaces whe
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at fafhrd91@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at robjtede@icloud.com ([@robjtede]) or huyuumi@neet.club ([@JohnTitor]). The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
[@robjtede]: https://github.com/robjtede
[@JohnTitor]: https://github.com/JohnTitor
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]

View File

@ -1,115 +1,131 @@
[package]
name = "actix-web"
version = "3.0.0"
version = "4.0.0-beta.20"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust."
readme = "README.md"
description = "Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust"
keywords = ["actix", "http", "web", "framework", "async"]
categories = [
"network-programming",
"asynchronous",
"web-programming::http-server",
"web-programming::websocket"
]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-web/"
categories = ["network-programming", "asynchronous",
"web-programming::http-server",
"web-programming::websocket"]
license = "MIT OR Apache-2.0"
edition = "2018"
[package.metadata.docs.rs]
features = ["openssl", "rustls", "compress", "secure-cookies"]
[badges]
travis-ci = { repository = "actix/actix-web", branch = "master" }
codecov = { repository = "actix/actix-web", branch = "master", service = "github" }
# features that docs.rs will build with
features = ["openssl", "rustls", "compress-brotli", "compress-gzip", "compress-zstd", "cookies", "secure-cookies"]
rustdoc-args = ["--cfg", "docsrs"]
[lib]
name = "actix_web"
path = "src/lib.rs"
[workspace]
resolver = "2"
members = [
".",
"awc",
"actix-http",
"actix-files",
"actix-multipart",
"actix-web-actors",
"actix-web-codegen",
"test-server",
".",
"actix-files",
"actix-http-test",
"actix-http",
"actix-multipart",
"actix-router",
"actix-test",
"actix-web-actors",
"actix-web-codegen",
"awc",
]
[features]
default = ["compress"]
default = ["compress-brotli", "compress-gzip", "compress-zstd", "cookies"]
# content-encoding support
compress = ["actix-http/compress", "awc/compress"]
# Brotli algorithm content-encoding support
compress-brotli = ["actix-http/compress-brotli", "__compress"]
# Gzip and deflate algorithms content-encoding support
compress-gzip = ["actix-http/compress-gzip", "__compress"]
# Zstd algorithm content-encoding support
compress-zstd = ["actix-http/compress-zstd", "__compress"]
# sessions feature
secure-cookies = ["actix-http/secure-cookies"]
# support for cookies
cookies = ["cookie"]
# secure cookies feature
secure-cookies = ["cookie/secure"]
# openssl
openssl = ["actix-tls/openssl", "awc/openssl", "open-ssl"]
openssl = ["actix-http/openssl", "actix-tls/accept", "actix-tls/openssl"]
# rustls
rustls = ["actix-tls/rustls", "awc/rustls", "rust-tls"]
rustls = ["actix-http/rustls", "actix-tls/accept", "actix-tls/rustls"]
[[example]]
name = "basic"
required-features = ["compress"]
# Internal (PRIVATE!) features used to aid testing and checking feature status.
# Don't rely on these whatsoever. They may disappear at anytime.
__compress = []
[[example]]
name = "uds"
required-features = ["compress"]
[[test]]
name = "test_server"
required-features = ["compress"]
# io-uring feature only avaiable for Linux OSes.
experimental-io-uring = ["actix-server/io-uring"]
[dependencies]
actix-codec = "0.3.0"
actix-service = "1.0.6"
actix-utils = "2.0.0"
actix-router = "0.2.4"
actix-rt = "1.1.1"
actix-server = "1.0.0"
actix-testing = "1.0.0"
actix-macros = "0.1.0"
actix-threadpool = "0.3.1"
actix-tls = "2.0.0"
actix-codec = "0.4.1"
actix-macros = "0.2.3"
actix-rt = "2.6"
actix-server = "2"
actix-service = "2.0.0"
actix-utils = "3.0.0"
actix-tls = { version = "3.0.0", default-features = false, optional = true }
actix-web-codegen = "0.3.0"
actix-http = "2.0.0"
awc = { version = "2.0.0", default-features = false }
actix-http = "3.0.0-beta.18"
actix-router = "0.5.0-rc.2"
actix-web-codegen = "0.5.0-rc.1"
bytes = "0.5.3"
derive_more = "0.99.2"
ahash = "0.7"
bytes = "1"
cfg-if = "1"
cookie = { version = "0.16", features = ["percent-encode"], optional = true }
derive_more = "0.99.5"
encoding_rs = "0.8"
futures-channel = { version = "0.3.5", default-features = false }
futures-core = { version = "0.3.5", default-features = false }
futures-util = { version = "0.3.5", default-features = false }
fxhash = "0.2.1"
futures-core = { version = "0.3.7", default-features = false }
futures-util = { version = "0.3.7", default-features = false }
itoa = "1"
language-tags = "0.3"
once_cell = "1.5"
log = "0.4"
mime = "0.3"
socket2 = "0.3"
pin-project = "0.4.17"
regex = "1.3"
pin-project-lite = "0.2.7"
regex = "1.4"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_urlencoded = "0.6.1"
time = { version = "0.2.7", default-features = false, features = ["std"] }
serde_urlencoded = "0.7"
smallvec = "1.6.1"
socket2 = "0.4.0"
time = { version = "0.3", default-features = false, features = ["formatting"] }
url = "2.1"
open-ssl = { package = "openssl", version = "0.10", optional = true }
rust-tls = { package = "rustls", version = "0.18.0", optional = true }
tinyvec = { version = "0.3", features = ["alloc"] }
[dev-dependencies]
actix = "0.10.0"
actix-http = { version = "2.0.0-beta.4", features = ["actors"] }
rand = "0.7"
env_logger = "0.7"
serde_derive = "1.0"
brotli2 = "0.3.2"
actix-files = "0.6.0-beta.14"
actix-test = { version = "0.1.0-beta.11", features = ["openssl", "rustls"] }
awc = { version = "3.0.0-beta.18", features = ["openssl"] }
brotli = "3.3.3"
const-str = "0.3"
criterion = { version = "0.3", features = ["html_reports"] }
env_logger = "0.9"
flate2 = "1.0.13"
criterion = "0.3"
futures-util = { version = "0.3.7", default-features = false, features = ["std"] }
rand = "0.8"
rcgen = "0.8"
rustls-pemfile = "0.2"
static_assertions = "1"
tls-openssl = { package = "openssl", version = "0.10.9" }
tls-rustls = { package = "rustls", version = "0.20.0" }
zstd = "0.9"
[profile.dev]
# Disabling debug info speeds up builds a bunch and we don't rely on it for debugging that much.
debug = 0
[profile.release]
lto = true
@ -117,17 +133,45 @@ opt-level = 3
codegen-units = 1
[patch.crates-io]
actix-web = { path = "." }
actix-http = { path = "actix-http" }
actix-http-test = { path = "test-server" }
actix-web-codegen = { path = "actix-web-codegen" }
actix-files = { path = "actix-files" }
actix-http = { path = "actix-http" }
actix-http-test = { path = "actix-http-test" }
actix-multipart = { path = "actix-multipart" }
actix-router = { path = "actix-router" }
actix-test = { path = "actix-test" }
actix-web = { path = "." }
actix-web-actors = { path = "actix-web-actors" }
actix-web-codegen = { path = "actix-web-codegen" }
awc = { path = "awc" }
# uncomment for quick testing against local actix-net repo
# actix-service = { path = "../actix-net/actix-service" }
# actix-macros = { path = "../actix-net/actix-macros" }
# actix-rt = { path = "../actix-net/actix-rt" }
# actix-codec = { path = "../actix-net/actix-codec" }
# actix-utils = { path = "../actix-net/actix-utils" }
# actix-tls = { path = "../actix-net/actix-tls" }
# actix-server = { path = "../actix-net/actix-server" }
[[test]]
name = "test_server"
required-features = ["compress-brotli", "compress-gzip", "compress-zstd", "cookies"]
[[test]]
name = "compression"
required-features = ["compress-brotli", "compress-gzip", "compress-zstd"]
[[example]]
name = "client"
required-features = ["rustls"]
name = "basic"
required-features = ["compress-gzip"]
[[example]]
name = "uds"
required-features = ["compress-gzip"]
[[example]]
name = "on-connect"
required-features = []
[[bench]]
name = "server"
@ -136,3 +180,7 @@ harness = false
[[bench]]
name = "service"
harness = false
[[bench]]
name = "responder"
harness = false

View File

@ -1,4 +1,4 @@
Copyright (c) 2017 Actix Team
Copyright (c) 2017-NOW Actix Team
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated

View File

@ -1,21 +1,57 @@
## Unreleased
- The default `NormalizePath` behavior now strips trailing slashes by default. This was
previously documented to be the case in v3 but the behavior now matches. The effect is that
routes defined with trailing slashes will become inaccessible when
using `NormalizePath::default()`. As such, calling `NormalizePath::default()` will log a warning.
It is advised that the `new` method be used instead.
Before: `#[get("/test/")]`
After: `#[get("/test")]`
Alternatively, explicitly require trailing slashes: `NormalizePath::new(TrailingSlash::Always)`.
- The `type Config` of `FromRequest` was removed.
- Feature flag `compress` has been split into its supported algorithm (brotli, gzip, zstd).
By default all compression algorithms are enabled.
To select algorithm you want to include with `middleware::Compress` use following flags:
- `compress-brotli`
- `compress-gzip`
- `compress-zstd`
If you have set in your `Cargo.toml` dedicated `actix-web` features and you still want
to have compression enabled. Please change features selection like bellow:
Before: `"compress"`
After: `"compress-brotli", "compress-gzip", "compress-zstd"`
## 3.0.0
* Setting a cookie's SameSite property, explicitly, to `SameSite::None` will now
- The return type for `ServiceRequest::app_data::<T>()` was changed from returning a `Data<T>` to
simply a `T`. To access a `Data<T>` use `ServiceRequest::app_data::<Data<T>>()`.
- Cookie handling has been offloaded to the `cookie` crate:
* `USERINFO_ENCODE_SET` is no longer exposed. Percent-encoding is still supported; check docs.
* Some types now require lifetime parameters.
- The time crate was updated to `v0.2`, a major breaking change to the time crate, which affects
any `actix-web` method previously expecting a time v0.1 input.
- Setting a cookie's SameSite property, explicitly, to `SameSite::None` will now
result in `SameSite=None` being sent with the response Set-Cookie header.
To create a cookie without a SameSite attribute, remove any calls setting same_site.
* actix-http support for Actors messages was moved to actix-http crate and is enabled
- actix-http support for Actors messages was moved to actix-http crate and is enabled
with feature `actors`
* content_length function is removed from actix-http.
- content_length function is removed from actix-http.
You can set Content-Length by normally setting the response body or calling no_chunking function.
* `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a
- `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a
`u64` instead of a `usize`.
* Code that was using `path.<index>` to access a `web::Path<(A, B, C)>`s elements now needs to use
- Code that was using `path.<index>` to access a `web::Path<(A, B, C)>`s elements now needs to use
destructuring or `.into_inner()`. For example:
```rust
@ -35,35 +71,35 @@
}
```
* `middleware::NormalizePath` can now also be configured to trim trailing slashes instead of always keeping one.
- `middleware::NormalizePath` can now also be configured to trim trailing slashes instead of always keeping one.
It will need `middleware::normalize::TrailingSlash` when being constructed with `NormalizePath::new(...)`,
or for an easier migration you can replace `wrap(middleware::NormalizePath)` with `wrap(middleware::NormalizePath::default())`.
or for an easier migration you can replace `wrap(middleware::NormalizePath)` with `wrap(middleware::NormalizePath::new(TrailingSlash::MergeOnly))`.
* `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`.
- `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`.
* `HttpServer::maxconnrate` is renamed to the more expressive `HttpServer::max_connection_rate`.
- `HttpServer::maxconnrate` is renamed to the more expressive `HttpServer::max_connection_rate`.
## 2.0.0
* `HttpServer::start()` renamed to `HttpServer::run()`. It also possible to
- `HttpServer::start()` renamed to `HttpServer::run()`. It also possible to
`.await` on `run` method result, in that case it awaits server exit.
* `App::register_data()` renamed to `App::app_data()` and accepts any type `T: 'static`.
- `App::register_data()` renamed to `App::app_data()` and accepts any type `T: 'static`.
Stored data is available via `HttpRequest::app_data()` method at runtime.
* Extractor configuration must be registered with `App::app_data()` instead of `App::data()`
- Extractor configuration must be registered with `App::app_data()` instead of `App::data()`
* Sync handlers has been removed. `.to_async()` method has been renamed to `.to()`
- Sync handlers has been removed. `.to_async()` method has been renamed to `.to()`
replace `fn` with `async fn` to convert sync handler to async
* `actix_http_test::TestServer` moved to `actix_web::test` module. To start
- `actix_http_test::TestServer` moved to `actix_web::test` module. To start
test server use `test::start()` or `test_start_with_config()` methods
* `ResponseError` trait has been reafctored. `ResponseError::error_response()` renders
- `ResponseError` trait has been reafctored. `ResponseError::error_response()` renders
http response.
* Feature `rust-tls` renamed to `rustls`
- Feature `rust-tls` renamed to `rustls`
instead of
@ -77,7 +113,7 @@
actix-web = { version = "2.0.0", features = ["rustls"] }
```
* Feature `ssl` renamed to `openssl`
- Feature `ssl` renamed to `openssl`
instead of
@ -90,11 +126,11 @@
```rust
actix-web = { version = "2.0.0", features = ["openssl"] }
```
* `Cors` builder now requires that you call `.finish()` to construct the middleware
- `Cors` builder now requires that you call `.finish()` to construct the middleware
## 1.0.1
* Cors middleware has been moved to `actix-cors` crate
- Cors middleware has been moved to `actix-cors` crate
instead of
@ -108,7 +144,7 @@
use actix_cors::Cors;
```
* Identity middleware has been moved to `actix-identity` crate
- Identity middleware has been moved to `actix-identity` crate
instead of
@ -125,7 +161,7 @@
## 1.0.0
* Extractor configuration. In version 1.0 this is handled with the new `Data` mechanism for both setting and retrieving the configuration
- Extractor configuration. In version 1.0 this is handled with the new `Data` mechanism for both setting and retrieving the configuration
instead of
@ -183,7 +219,7 @@
)
```
* Resource registration. 1.0 version uses generalized resource
- Resource registration. 1.0 version uses generalized resource
registration via `.service()` method.
instead of
@ -203,7 +239,7 @@
.route(web::post().to(post_handler))
```
* Scope registration.
- Scope registration.
instead of
@ -227,7 +263,7 @@
);
```
* `.with()`, `.with_async()` registration methods have been renamed to `.to()` and `.to_async()`.
- `.with()`, `.with_async()` registration methods have been renamed to `.to()` and `.to_async()`.
instead of
@ -241,7 +277,7 @@
App.new().service(web::resource("/welcome").to(welcome))
```
* Passing arguments to handler with extractors, multiple arguments are allowed
- Passing arguments to handler with extractors, multiple arguments are allowed
instead of
@ -259,7 +295,7 @@
}
```
* `.f()`, `.a()` and `.h()` handler registration methods have been removed.
- `.f()`, `.a()` and `.h()` handler registration methods have been removed.
Use `.to()` for handlers and `.to_async()` for async handlers. Handler function
must use extractors.
@ -275,7 +311,7 @@
App.new().service(web::resource("/welcome").to(welcome))
```
* `HttpRequest` does not provide access to request's payload stream.
- `HttpRequest` does not provide access to request's payload stream.
instead of
@ -305,7 +341,7 @@
}
```
* `State` is now `Data`. You register Data during the App initialization process
- `State` is now `Data`. You register Data during the App initialization process
and then access it from handlers either using a Data extractor or using
HttpRequest's api.
@ -341,7 +377,7 @@
```
* AsyncResponder is removed, use `.to_async()` registration method and `impl Future<>` as result type.
- AsyncResponder is removed, use `.to_async()` registration method and `impl Future<>` as result type.
instead of
@ -357,7 +393,7 @@
.. simply omit AsyncResponder and the corresponding responder() finish method
* Middleware
- Middleware
instead of
@ -374,7 +410,7 @@
.route("/index.html", web::get().to(index));
```
* `HttpRequest::body()`, `HttpRequest::urlencoded()`, `HttpRequest::json()`, `HttpRequest::multipart()`
- `HttpRequest::body()`, `HttpRequest::urlencoded()`, `HttpRequest::json()`, `HttpRequest::multipart()`
method have been removed. Use `Bytes`, `String`, `Form`, `Json`, `Multipart` extractors instead.
instead of
@ -396,9 +432,9 @@
}
```
* `actix_web::server` module has been removed. To start http server use `actix_web::HttpServer` type
- `actix_web::server` module has been removed. To start http server use `actix_web::HttpServer` type
* StaticFiles and NamedFile have been moved to a separate crate.
- StaticFiles and NamedFile have been moved to a separate crate.
instead of `use actix_web::fs::StaticFile`
@ -408,20 +444,20 @@
use `use actix_files::NamedFile`
* Multipart has been moved to a separate crate.
- Multipart has been moved to a separate crate.
instead of `use actix_web::multipart::Multipart`
use `use actix_multipart::Multipart`
* Response compression is not enabled by default.
- Response compression is not enabled by default.
To enable, use `Compress` middleware, `App::new().wrap(Compress::default())`.
* Session middleware moved to actix-session crate
- Session middleware moved to actix-session crate
* Actors support have been moved to `actix-web-actors` crate
- Actors support have been moved to `actix-web-actors` crate
* Custom Error
- Custom Error
Instead of error_response method alone, ResponseError now provides two methods: error_response and render_response respectively. Where, error_response creates the error response and render_response returns the error response to the caller.
@ -435,7 +471,7 @@
## 0.7.15
* The `' '` character is not percent decoded anymore before matching routes. If you need to use it in
- The `' '` character is not percent decoded anymore before matching routes. If you need to use it in
your routes, you should use `%20`.
instead of
@ -460,18 +496,18 @@
}
```
* If you used `AsyncResult::async` you need to replace it with `AsyncResult::future`
- If you used `AsyncResult::async` you need to replace it with `AsyncResult::future`
## 0.7.4
* `Route::with_config()`/`Route::with_async_config()` always passes configuration objects as tuple
- `Route::with_config()`/`Route::with_async_config()` always passes configuration objects as tuple
even for handler with one parameter.
## 0.7
* `HttpRequest` does not implement `Stream` anymore. If you need to read request payload
- `HttpRequest` does not implement `Stream` anymore. If you need to read request payload
use `HttpMessage::payload()` method.
instead of
@ -497,10 +533,10 @@
}
```
* [Middleware](https://actix.rs/actix-web/actix_web/middleware/trait.Middleware.html)
- [Middleware](https://actix.rs/actix-web/actix_web/middleware/trait.Middleware.html)
trait uses `&HttpRequest` instead of `&mut HttpRequest`.
* Removed `Route::with2()` and `Route::with3()` use tuple of extractors instead.
- Removed `Route::with2()` and `Route::with3()` use tuple of extractors instead.
instead of
@ -514,17 +550,17 @@
fn index((query, json): (Query<..>, Json<MyStruct)) -> impl Responder {}
```
* `Handler::handle()` uses `&self` instead of `&mut self`
- `Handler::handle()` uses `&self` instead of `&mut self`
* `Handler::handle()` accepts reference to `HttpRequest<_>` instead of value
- `Handler::handle()` accepts reference to `HttpRequest<_>` instead of value
* Removed deprecated `HttpServer::threads()`, use
- Removed deprecated `HttpServer::threads()`, use
[HttpServer::workers()](https://actix.rs/actix-web/actix_web/server/struct.HttpServer.html#method.workers) instead.
* Renamed `client::ClientConnectorError::Connector` to
- Renamed `client::ClientConnectorError::Connector` to
`client::ClientConnectorError::Resolver`
* `Route::with()` does not return `ExtractorConfig`, to configure
- `Route::with()` does not return `ExtractorConfig`, to configure
extractor use `Route::with_config()`
instead of
@ -553,26 +589,26 @@
}
```
* `Route::with_async()` does not return `ExtractorConfig`, to configure
- `Route::with_async()` does not return `ExtractorConfig`, to configure
extractor use `Route::with_async_config()`
## 0.6
* `Path<T>` extractor return `ErrorNotFound` on failure instead of `ErrorBadRequest`
- `Path<T>` extractor return `ErrorNotFound` on failure instead of `ErrorBadRequest`
* `ws::Message::Close` now includes optional close reason.
- `ws::Message::Close` now includes optional close reason.
`ws::CloseCode::Status` and `ws::CloseCode::Empty` have been removed.
* `HttpServer::threads()` renamed to `HttpServer::workers()`.
- `HttpServer::threads()` renamed to `HttpServer::workers()`.
* `HttpServer::start_ssl()` and `HttpServer::start_tls()` deprecated.
- `HttpServer::start_ssl()` and `HttpServer::start_tls()` deprecated.
Use `HttpServer::bind_ssl()` and `HttpServer::bind_tls()` instead.
* `HttpRequest::extensions()` returns read only reference to the request's Extension
- `HttpRequest::extensions()` returns read only reference to the request's Extension
`HttpRequest::extensions_mut()` returns mutable reference.
* Instead of
- Instead of
`use actix_web::middleware::{
CookieSessionBackend, CookieSessionError, RequestSession,
@ -583,15 +619,15 @@
`use actix_web::middleware::session{CookieSessionBackend, CookieSessionError,
RequestSession, Session, SessionBackend, SessionImpl, SessionStorage};`
* `FromRequest::from_request()` accepts mutable reference to a request
- `FromRequest::from_request()` accepts mutable reference to a request
* `FromRequest::Result` has to implement `Into<Reply<Self>>`
- `FromRequest::Result` has to implement `Into<Reply<Self>>`
* [`Responder::respond_to()`](
- [`Responder::respond_to()`](
https://actix.rs/actix-web/actix_web/trait.Responder.html#tymethod.respond_to)
is generic over `S`
* Use `Query` extractor instead of HttpRequest::query()`.
- Use `Query` extractor instead of HttpRequest::query()`.
```rust
fn index(q: Query<HashMap<String, String>>) -> Result<..> {
@ -605,37 +641,37 @@
let q = Query::<HashMap<String, String>>::extract(req);
```
* Websocket operations are implemented as `WsWriter` trait.
- Websocket operations are implemented as `WsWriter` trait.
you need to use `use actix_web::ws::WsWriter`
## 0.5
* `HttpResponseBuilder::body()`, `.finish()`, `.json()`
- `HttpResponseBuilder::body()`, `.finish()`, `.json()`
methods return `HttpResponse` instead of `Result<HttpResponse>`
* `actix_web::Method`, `actix_web::StatusCode`, `actix_web::Version`
- `actix_web::Method`, `actix_web::StatusCode`, `actix_web::Version`
moved to `actix_web::http` module
* `actix_web::header` moved to `actix_web::http::header`
- `actix_web::header` moved to `actix_web::http::header`
* `NormalizePath` moved to `actix_web::http` module
- `NormalizePath` moved to `actix_web::http` module
* `HttpServer` moved to `actix_web::server`, added new `actix_web::server::new()` function,
- `HttpServer` moved to `actix_web::server`, added new `actix_web::server::new()` function,
shortcut for `actix_web::server::HttpServer::new()`
* `DefaultHeaders` middleware does not use separate builder, all builder methods moved to type itself
- `DefaultHeaders` middleware does not use separate builder, all builder methods moved to type itself
* `StaticFiles::new()`'s show_index parameter removed, use `show_files_listing()` method instead.
- `StaticFiles::new()`'s show_index parameter removed, use `show_files_listing()` method instead.
* `CookieSessionBackendBuilder` removed, all methods moved to `CookieSessionBackend` type
- `CookieSessionBackendBuilder` removed, all methods moved to `CookieSessionBackend` type
* `actix_web::httpcodes` module is deprecated, `HttpResponse::Ok()`, `HttpResponse::Found()` and other `HttpResponse::XXX()`
- `actix_web::httpcodes` module is deprecated, `HttpResponse::Ok()`, `HttpResponse::Found()` and other `HttpResponse::XXX()`
functions should be used instead
* `ClientRequestBuilder::body()` returns `Result<_, actix_web::Error>`
- `ClientRequestBuilder::body()` returns `Result<_, actix_web::Error>`
instead of `Result<_, http::Error>`
* `Application` renamed to a `App`
- `Application` renamed to a `App`
* `actix_web::Reply`, `actix_web::Resource` moved to `actix_web::dev`
- `actix_web::Reply`, `actix_web::Resource` moved to `actix_web::dev`

View File

@ -1,45 +1,45 @@
<div align="center">
<h1>Actix web</h1>
<h1>Actix Web</h1>
<p>
<strong>Actix web is a powerful, pragmatic, and extremely fast web framework for Rust</strong>
<strong>Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust</strong>
</p>
<p>
[![crates.io](https://meritbadge.herokuapp.com/actix-web)](https://crates.io/crates/actix-web)
[![Documentation](https://docs.rs/actix-web/badge.svg)](https://docs.rs/actix-web)
[![Version](https://img.shields.io/badge/rustc-1.42+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.42.html)
![License](https://img.shields.io/crates/l/actix-web.svg)
[![crates.io](https://img.shields.io/crates/v/actix-web?label=latest)](https://crates.io/crates/actix-web)
[![Documentation](https://docs.rs/actix-web/badge.svg?version=4.0.0-beta.20)](https://docs.rs/actix-web/4.0.0-beta.20)
![MSRV](https://img.shields.io/badge/rustc-1.54+-ab6000.svg)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-web.svg)
[![Dependency Status](https://deps.rs/crate/actix-web/4.0.0-beta.20/status.svg)](https://deps.rs/crate/actix-web/4.0.0-beta.20)
<br />
[![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web)
[![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web)
[![Download](https://img.shields.io/crates/d/actix-web.svg)](https://crates.io/crates/actix-web)
[![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![CI](https://github.com/actix/actix-web/actions/workflows/ci.yml/badge.svg)](https://github.com/actix/actix-web/actions/workflows/ci.yml)
[![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web)
![downloads](https://img.shields.io/crates/d/actix-web.svg)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
</p>
</div>
## Features
* Supports *HTTP/1.x* and *HTTP/2*
* Streaming and pipelining
* Keep-alive and slow requests handling
* Client/server [WebSockets](https://actix.rs/docs/websockets/) support
* Transparent content compression/decompression (br, gzip, deflate)
* Powerful [request routing](https://actix.rs/docs/url-dispatch/)
* Multipart streams
* Static assets
* SSL support using OpenSSL or Rustls
* Middlewares ([Logger, Session, CORS, etc](https://actix.rs/docs/middleware/))
* Includes an async [HTTP client](https://actix.rs/actix-web/actix_web/client/index.html)
* Supports [Actix actor framework](https://github.com/actix/actix)
* Runs on stable Rust 1.42+
- Supports *HTTP/1.x* and *HTTP/2*
- Streaming and pipelining
- Keep-alive and slow requests handling
- Client/server [WebSockets](https://actix.rs/docs/websockets/) support
- Transparent content compression/decompression (br, gzip, deflate, zstd)
- Powerful [request routing](https://actix.rs/docs/url-dispatch/)
- Multipart streams
- Static assets
- SSL support using OpenSSL or Rustls
- Middlewares ([Logger, Session, CORS, etc](https://actix.rs/docs/middleware/))
- Includes an async [HTTP client](https://docs.rs/awc/)
- Runs on stable Rust 1.54+
## Documentation
* [Website & User Guide](https://actix.rs)
* [Examples Repository](https://github.com/actix/examples)
* [API Documentation](https://docs.rs/actix-web)
* [API Documentation (master branch)](https://actix.rs/actix-web/actix_web)
- [Website & User Guide](https://actix.rs)
- [Examples Repository](https://github.com/actix/examples)
- [API Documentation](https://docs.rs/actix-web)
- [API Documentation (master branch)](https://actix.rs/actix-web/actix_web)
## Example
@ -71,18 +71,18 @@ async fn main() -> std::io::Result<()> {
### More examples
* [Basic Setup](https://github.com/actix/examples/tree/master/basics/)
* [Application State](https://github.com/actix/examples/tree/master/state/)
* [JSON Handling](https://github.com/actix/examples/tree/master/json/)
* [Multipart Streams](https://github.com/actix/examples/tree/master/multipart/)
* [Diesel Integration](https://github.com/actix/examples/tree/master/diesel/)
* [r2d2 Integration](https://github.com/actix/examples/tree/master/r2d2/)
* [Simple WebSocket](https://github.com/actix/examples/tree/master/websocket/)
* [Tera Templates](https://github.com/actix/examples/tree/master/template_tera/)
* [Askama Templates](https://github.com/actix/examples/tree/master/template_askama/)
* [HTTPS using Rustls](https://github.com/actix/examples/tree/master/rustls/)
* [HTTPS using OpenSSL](https://github.com/actix/examples/tree/master/openssl/)
* [WebSocket Chat](https://github.com/actix/examples/tree/master/websocket-chat/)
- [Basic Setup](https://github.com/actix/examples/tree/master/basics/basics/)
- [Application State](https://github.com/actix/examples/tree/master/basics/state/)
- [JSON Handling](https://github.com/actix/examples/tree/master/json/json/)
- [Multipart Streams](https://github.com/actix/examples/tree/master/forms/multipart/)
- [Diesel Integration](https://github.com/actix/examples/tree/master/database_interactions/diesel/)
- [r2d2 Integration](https://github.com/actix/examples/tree/master/database_interactions/r2d2/)
- [Simple WebSocket](https://github.com/actix/examples/tree/master/websockets/websocket/)
- [Tera Templates](https://github.com/actix/examples/tree/master/template_engines/tera/)
- [Askama Templates](https://github.com/actix/examples/tree/master/template_engines/askama/)
- [HTTPS using Rustls](https://github.com/actix/examples/tree/master/security/rustls/)
- [HTTPS using OpenSSL](https://github.com/actix/examples/tree/master/security/openssl/)
- [WebSocket Chat](https://github.com/actix/examples/tree/master/websockets/chat/)
You may consider checking out
[this directory](https://github.com/actix/examples/tree/master/) for more examples.
@ -90,20 +90,20 @@ You may consider checking out
## Benchmarks
One of the fastest web frameworks available according to the
[TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r19).
[TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r20&test=composite).
## License
This project is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
[http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
* MIT license ([LICENSE-MIT](LICENSE-MIT) or
[http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
[http://www.apache.org/licenses/LICENSE-2.0])
- MIT license ([LICENSE-MIT](LICENSE-MIT) or
[http://opensource.org/licenses/MIT])
at your option.
## Code of Conduct
Contribution to the actix-web crate is organized under the terms of the Contributor Covenant, the
maintainers of Actix web, promises to intervene to uphold that code of conduct.
Contribution to the actix-web repo is organized under the terms of the Contributor Covenant.
The Actix team promises to intervene to uphold that code of conduct.

View File

@ -1,90 +1,199 @@
# Changes
## [Unreleased] - 2020-xx-xx
## Unreleased - 2021-xx-xx
## [0.3.0-beta.1] - 2020-07-15
* Update `v_htmlescape` to 0.10
* Update `actix-web` and `actix-http` dependencies to beta.1
## [0.3.0-alpha.1] - 2020-05-23
* Update `actix-web` and `actix-http` dependencies to alpha
* Fix some typos in the docs
* Bump minimum supported Rust version to 1.40
* Support sending Content-Length when Content-Range is specified [#1384]
## 0.6.0-beta.14 - 2022-01-14
- The `prefer_utf8` option introduced in `0.4.0` is now true by default. [#2583]
[#2583]: https://github.com/actix/actix-web/pull/2583
## 0.6.0-beta.13 - 2022-01-04
- The `Files` service now rejects requests with URL paths that include `%2F` (decoded: `/`). [#2398]
- The `Files` service now correctly decodes `%25` in the URL path to `%` for the file path. [#2398]
- Minimum supported Rust version (MSRV) is now 1.54.
[#2398]: https://github.com/actix/actix-web/pull/2398
## 0.6.0-beta.12 - 2021-12-29
- No significant changes since `0.6.0-beta.11`.
## 0.6.0-beta.11 - 2021-12-27
- No significant changes since `0.6.0-beta.10`.
## 0.6.0-beta.10 - 2021-12-11
- No significant changes since `0.6.0-beta.9`.
## 0.6.0-beta.9 - 2021-11-22
- Add crate feature `experimental-io-uring`, enabling async file I/O to be utilized. This feature is only available on Linux OSes with recent kernel versions. This feature is semver-exempt. [#2408]
- Add `NamedFile::open_async`. [#2408]
- Fix 304 Not Modified responses to omit the Content-Length header, as per the spec. [#2453]
- The `Responder` impl for `NamedFile` now has a boxed future associated type. [#2408]
- The `Service` impl for `NamedFileService` now has a boxed future associated type. [#2408]
- Add `impl Clone` for `FilesService`. [#2408]
[#2408]: https://github.com/actix/actix-web/pull/2408
[#2453]: https://github.com/actix/actix-web/pull/2453
## 0.6.0-beta.8 - 2021-10-20
- Minimum supported Rust version (MSRV) is now 1.52.
## 0.6.0-beta.7 - 2021-09-09
- Minimum supported Rust version (MSRV) is now 1.51.
## 0.6.0-beta.6 - 2021-06-26
- Added `Files::path_filter()`. [#2274]
- `Files::show_files_listing()` can now be used with `Files::index_file()` to show files listing as a fallback when the index file is not found. [#2228]
[#2274]: https://github.com/actix/actix-web/pull/2274
[#2228]: https://github.com/actix/actix-web/pull/2228
## 0.6.0-beta.5 - 2021-06-17
- `NamedFile` now implements `ServiceFactory` and `HttpServiceFactory` making it much more useful in routing. For example, it can be used directly as a default service. [#2135]
- For symbolic links, `Content-Disposition` header no longer shows the filename of the original file. [#2156]
- `Files::redirect_to_slash_directory()` now works as expected when used with `Files::show_files_listing()`. [#2225]
- `application/{javascript, json, wasm}` mime type now have `inline` disposition by default. [#2257]
[#2135]: https://github.com/actix/actix-web/pull/2135
[#2156]: https://github.com/actix/actix-web/pull/2156
[#2225]: https://github.com/actix/actix-web/pull/2225
[#2257]: https://github.com/actix/actix-web/pull/2257
## 0.6.0-beta.4 - 2021-04-02
- Add support for `.guard` in `Files` to selectively filter `Files` services. [#2046]
[#2046]: https://github.com/actix/actix-web/pull/2046
## 0.6.0-beta.3 - 2021-03-09
- No notable changes.
## 0.6.0-beta.2 - 2021-02-10
- Fix If-Modified-Since and If-Unmodified-Since to not compare using sub-second timestamps. [#1887]
- Replace `v_htmlescape` with `askama_escape`. [#1953]
[#1887]: https://github.com/actix/actix-web/pull/1887
[#1953]: https://github.com/actix/actix-web/pull/1953
## 0.6.0-beta.1 - 2021-01-07
- `HttpRange::parse` now has its own error type.
- Update `bytes` to `1.0`. [#1813]
[#1813]: https://github.com/actix/actix-web/pull/1813
## 0.5.0 - 2020-12-26
- Optionally support hidden files/directories. [#1811]
[#1811]: https://github.com/actix/actix-web/pull/1811
## 0.4.1 - 2020-11-24
- Clarify order of parameters in `Files::new` and improve docs.
## 0.4.0 - 2020-10-06
- Add `Files::prefer_utf8` option that adds UTF-8 charset on certain response types. [#1714]
[#1714]: https://github.com/actix/actix-web/pull/1714
## 0.3.0 - 2020-09-11
- No significant changes from 0.3.0-beta.1.
## 0.3.0-beta.1 - 2020-07-15
- Update `v_htmlescape` to 0.10
- Update `actix-web` and `actix-http` dependencies to beta.1
## 0.3.0-alpha.1 - 2020-05-23
- Update `actix-web` and `actix-http` dependencies to alpha
- Fix some typos in the docs
- Bump minimum supported Rust version to 1.40
- Support sending Content-Length when Content-Range is specified [#1384]
[#1384]: https://github.com/actix/actix-web/pull/1384
## [0.2.1] - 2019-12-22
* Use the same format for file URLs regardless of platforms
## 0.2.1 - 2019-12-22
- Use the same format for file URLs regardless of platforms
## [0.2.0] - 2019-12-20
* Fix BodyEncoding trait import #1220
## 0.2.0 - 2019-12-20
- Fix BodyEncoding trait import #1220
## [0.2.0-alpha.1] - 2019-12-07
* Migrate to `std::future`
## 0.2.0-alpha.1 - 2019-12-07
- Migrate to `std::future`
## [0.1.7] - 2019-11-06
* Add an additional `filename*` param in the `Content-Disposition` header of `actix_files::NamedFile` to be more compatible. (#1151)
## 0.1.7 - 2019-11-06
- Add an additional `filename*` param in the `Content-Disposition` header of
`actix_files::NamedFile` to be more compatible. (#1151)
## [0.1.6] - 2019-10-14
## 0.1.6 - 2019-10-14
- Add option to redirect to a slash-ended path `Files` #1132
* Add option to redirect to a slash-ended path `Files` #1132
## [0.1.5] - 2019-10-08
## 0.1.5 - 2019-10-08
- Bump up `mime_guess` crate version to 2.0.1
- Bump up `percent-encoding` crate version to 2.1
- Allow user defined request guards for `Files` #1113
* Bump up `mime_guess` crate version to 2.0.1
* Bump up `percent-encoding` crate version to 2.1
## 0.1.4 - 2019-07-20
- Allow to disable `Content-Disposition` header #686
* Allow user defined request guards for `Files` #1113
## [0.1.4] - 2019-07-20
## 0.1.3 - 2019-06-28
- Do not set `Content-Length` header, let actix-http set it #930
* Allow to disable `Content-Disposition` header #686
## [0.1.3] - 2019-06-28
## 0.1.2 - 2019-06-13
- Content-Length is 0 for NamedFile HEAD request #914
- Fix ring dependency from actix-web default features for #741
* Do not set `Content-Length` header, let actix-http set it #930
## [0.1.2] - 2019-06-13
## 0.1.1 - 2019-06-01
- Static files are incorrectly served as both chunked and with length #812
* Content-Length is 0 for NamedFile HEAD request #914
* Fix ring dependency from actix-web default features for #741
## 0.1.0 - 2019-05-25
- NamedFile last-modified check always fails due to nano-seconds in file modified date #820
## [0.1.1] - 2019-06-01
* Static files are incorrectly served as both chunked and with length #812
## 0.1.0-beta.4 - 2019-05-12
- Update actix-web to beta.4
## [0.1.0] - 2019-05-25
* NamedFile last-modified check always fails due to nano-seconds
in file modified date #820
## 0.1.0-beta.1 - 2019-04-20
- Update actix-web to beta.1
## [0.1.0-beta.4] - 2019-05-12
* Update actix-web to beta.4
## 0.1.0-alpha.6 - 2019-04-14
- Update actix-web to alpha6
## [0.1.0-beta.1] - 2019-04-20
* Update actix-web to beta.1
## 0.1.0-alpha.4 - 2019-04-08
- Update actix-web to alpha4
## [0.1.0-alpha.6] - 2019-04-14
* Update actix-web to alpha6
## 0.1.0-alpha.2 - 2019-04-02
- Add default handler support
## [0.1.0-alpha.4] - 2019-04-08
* Update actix-web to alpha4
## [0.1.0-alpha.2] - 2019-04-02
* Add default handler support
## [0.1.0-alpha.1] - 2019-03-28
* Initial impl
## 0.1.0-alpha.1 - 2019-03-28
- Initial impl

View File

@ -1,13 +1,15 @@
[package]
name = "actix-files"
version = "0.3.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Static files support for actix web."
readme = "README.md"
version = "0.6.0-beta.14"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"fakeshadow <24548779@qq.com>",
"Rob Ede <robjtede@icloud.com>",
]
description = "Static file serving for Actix Web"
keywords = ["actix", "http", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-files/"
repository = "https://github.com/actix/actix-web"
categories = ["asynchronous", "web-programming::http-server"]
license = "MIT OR Apache-2.0"
edition = "2018"
@ -16,21 +18,31 @@ edition = "2018"
name = "actix_files"
path = "src/lib.rs"
[features]
experimental-io-uring = ["actix-web/experimental-io-uring", "tokio-uring"]
[dependencies]
actix-web = { version = "3.0.0", default-features = false }
actix-http = "2.0.0"
actix-service = "1.0.6"
actix-http = "3.0.0-beta.18"
actix-service = "2"
actix-utils = "3"
actix-web = { version = "4.0.0-beta.20", default-features = false }
askama_escape = "0.10"
bitflags = "1"
bytes = "0.5.3"
futures-core = { version = "0.3.5", default-features = false }
futures-util = { version = "0.3.5", default-features = false }
derive_more = "0.99.2"
bytes = "1"
derive_more = "0.99.5"
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
http-range = "0.1.4"
log = "0.4"
mime = "0.3"
mime_guess = "2.0.1"
percent-encoding = "2.1"
v_htmlescape = "0.10"
pin-project-lite = "0.2.7"
tokio-uring = { version = "0.2", optional = true, features = ["bytes"] }
[dev-dependencies]
actix-rt = "1.0.0"
actix-web = { version = "3.0.0", features = ["openssl"] }
actix-rt = "2.2"
actix-test = "0.1.0-beta.11"
actix-web = "4.0.0-beta.20"
tempfile = "3.2"

View File

@ -1,9 +1,18 @@
# Static files support for actix web [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-files)](https://crates.io/crates/actix-files) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
# actix-files
## Documentation & community resources
> Static file serving for Actix Web
* [User Guide](https://actix.rs/docs/)
* [API Documentation](https://docs.rs/actix-files/)
* [Chat on gitter](https://gitter.im/actix/actix)
* Cargo package: [actix-files](https://crates.io/crates/actix-files)
* Minimum supported Rust version: 1.40 or later
[![crates.io](https://img.shields.io/crates/v/actix-files?label=latest)](https://crates.io/crates/actix-files)
[![Documentation](https://docs.rs/actix-files/badge.svg?version=0.6.0-beta.14)](https://docs.rs/actix-files/0.6.0-beta.14)
[![Version](https://img.shields.io/badge/rustc-1.54+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.54.0.html)
![License](https://img.shields.io/crates/l/actix-files.svg)
<br />
[![dependency status](https://deps.rs/crate/actix-files/0.6.0-beta.14/status.svg)](https://deps.rs/crate/actix-files/0.6.0-beta.14)
[![Download](https://img.shields.io/crates/d/actix-files.svg)](https://crates.io/crates/actix-files)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
## Documentation & Resources
- [API Documentation](https://docs.rs/actix-files/)
- [Example Project](https://github.com/actix/examples/tree/master/basics/static_index)
- Minimum Supported Rust Version (MSRV): 1.54

219
actix-files/src/chunked.rs Normal file
View File

@ -0,0 +1,219 @@
use std::{
cmp, fmt,
future::Future,
io,
pin::Pin,
task::{Context, Poll},
};
use actix_web::{error::Error, web::Bytes};
use futures_core::{ready, Stream};
use pin_project_lite::pin_project;
#[cfg(feature = "experimental-io-uring")]
use bytes::BytesMut;
use super::named::File;
pin_project! {
/// Adapter to read a `std::file::File` in chunks.
#[doc(hidden)]
pub struct ChunkedReadFile<F, Fut> {
size: u64,
offset: u64,
#[pin]
state: ChunkedReadFileState<Fut>,
counter: u64,
callback: F,
}
}
#[cfg(not(feature = "experimental-io-uring"))]
pin_project! {
#[project = ChunkedReadFileStateProj]
#[project_replace = ChunkedReadFileStateProjReplace]
enum ChunkedReadFileState<Fut> {
File { file: Option<File>, },
Future { #[pin] fut: Fut },
}
}
#[cfg(feature = "experimental-io-uring")]
pin_project! {
#[project = ChunkedReadFileStateProj]
#[project_replace = ChunkedReadFileStateProjReplace]
enum ChunkedReadFileState<Fut> {
File { file: Option<(File, BytesMut)> },
Future { #[pin] fut: Fut },
}
}
impl<F, Fut> fmt::Debug for ChunkedReadFile<F, Fut> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("ChunkedReadFile")
}
}
pub(crate) fn new_chunked_read(
size: u64,
offset: u64,
file: File,
) -> impl Stream<Item = Result<Bytes, Error>> {
ChunkedReadFile {
size,
offset,
#[cfg(not(feature = "experimental-io-uring"))]
state: ChunkedReadFileState::File { file: Some(file) },
#[cfg(feature = "experimental-io-uring")]
state: ChunkedReadFileState::File {
file: Some((file, BytesMut::new())),
},
counter: 0,
callback: chunked_read_file_callback,
}
}
#[cfg(not(feature = "experimental-io-uring"))]
async fn chunked_read_file_callback(
mut file: File,
offset: u64,
max_bytes: usize,
) -> Result<(File, Bytes), Error> {
use io::{Read as _, Seek as _};
let res = actix_web::rt::task::spawn_blocking(move || {
let mut buf = Vec::with_capacity(max_bytes);
file.seek(io::SeekFrom::Start(offset))?;
let n_bytes = file.by_ref().take(max_bytes as u64).read_to_end(&mut buf)?;
if n_bytes == 0 {
Err(io::Error::from(io::ErrorKind::UnexpectedEof))
} else {
Ok((file, Bytes::from(buf)))
}
})
.await
.map_err(|_| actix_web::error::BlockingError)??;
Ok(res)
}
#[cfg(feature = "experimental-io-uring")]
async fn chunked_read_file_callback(
file: File,
offset: u64,
max_bytes: usize,
mut bytes_mut: BytesMut,
) -> io::Result<(File, Bytes, BytesMut)> {
bytes_mut.reserve(max_bytes);
let (res, mut bytes_mut) = file.read_at(bytes_mut, offset).await;
let n_bytes = res?;
if n_bytes == 0 {
return Err(io::ErrorKind::UnexpectedEof.into());
}
let bytes = bytes_mut.split_to(n_bytes).freeze();
Ok((file, bytes, bytes_mut))
}
#[cfg(feature = "experimental-io-uring")]
impl<F, Fut> Stream for ChunkedReadFile<F, Fut>
where
F: Fn(File, u64, usize, BytesMut) -> Fut,
Fut: Future<Output = io::Result<(File, Bytes, BytesMut)>>,
{
type Item = Result<Bytes, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.as_mut().project();
match this.state.as_mut().project() {
ChunkedReadFileStateProj::File { file } => {
let size = *this.size;
let offset = *this.offset;
let counter = *this.counter;
if size == counter {
Poll::Ready(None)
} else {
let max_bytes = cmp::min(size.saturating_sub(counter), 65_536) as usize;
let (file, bytes_mut) = file
.take()
.expect("ChunkedReadFile polled after completion");
let fut = (this.callback)(file, offset, max_bytes, bytes_mut);
this.state
.project_replace(ChunkedReadFileState::Future { fut });
self.poll_next(cx)
}
}
ChunkedReadFileStateProj::Future { fut } => {
let (file, bytes, bytes_mut) = ready!(fut.poll(cx))?;
this.state.project_replace(ChunkedReadFileState::File {
file: Some((file, bytes_mut)),
});
*this.offset += bytes.len() as u64;
*this.counter += bytes.len() as u64;
Poll::Ready(Some(Ok(bytes)))
}
}
}
}
#[cfg(not(feature = "experimental-io-uring"))]
impl<F, Fut> Stream for ChunkedReadFile<F, Fut>
where
F: Fn(File, u64, usize) -> Fut,
Fut: Future<Output = Result<(File, Bytes), Error>>,
{
type Item = Result<Bytes, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.as_mut().project();
match this.state.as_mut().project() {
ChunkedReadFileStateProj::File { file } => {
let size = *this.size;
let offset = *this.offset;
let counter = *this.counter;
if size == counter {
Poll::Ready(None)
} else {
let max_bytes = cmp::min(size.saturating_sub(counter), 65_536) as usize;
let file = file
.take()
.expect("ChunkedReadFile polled after completion");
let fut = (this.callback)(file, offset, max_bytes);
this.state
.project_replace(ChunkedReadFileState::Future { fut });
self.poll_next(cx)
}
}
ChunkedReadFileStateProj::Future { fut } => {
let (file, bytes) = ready!(fut.poll(cx))?;
this.state
.project_replace(ChunkedReadFileState::File { file: Some(file) });
*this.offset += bytes.len() as u64;
*this.counter += bytes.len() as u64;
Poll::Ready(Some(Ok(bytes)))
}
}
}
}

View File

@ -0,0 +1,121 @@
use std::{fmt::Write, fs::DirEntry, io, path::Path, path::PathBuf};
use actix_web::{dev::ServiceResponse, HttpRequest, HttpResponse};
use askama_escape::{escape as escape_html_entity, Html};
use percent_encoding::{utf8_percent_encode, CONTROLS};
/// A directory; responds with the generated directory listing.
#[derive(Debug)]
pub struct Directory {
/// Base directory.
pub base: PathBuf,
/// Path of subdirectory to generate listing for.
pub path: PathBuf,
}
impl Directory {
/// Create a new directory
pub fn new(base: PathBuf, path: PathBuf) -> Directory {
Directory { base, path }
}
/// Is this entry visible from this directory?
pub fn is_visible(&self, entry: &io::Result<DirEntry>) -> bool {
if let Ok(ref entry) = *entry {
if let Some(name) = entry.file_name().to_str() {
if name.starts_with('.') {
return false;
}
}
if let Ok(ref md) = entry.metadata() {
let ft = md.file_type();
return ft.is_dir() || ft.is_file() || ft.is_symlink();
}
}
false
}
}
pub(crate) type DirectoryRenderer =
dyn Fn(&Directory, &HttpRequest) -> Result<ServiceResponse, io::Error>;
/// Returns percent encoded file URL path.
macro_rules! encode_file_url {
($path:ident) => {
utf8_percent_encode(&$path, CONTROLS)
};
}
/// Returns HTML entity encoded formatter.
///
/// ```plain
/// " => &quot;
/// & => &amp;
/// ' => &#x27;
/// < => &lt;
/// > => &gt;
/// / => &#x2f;
/// ```
macro_rules! encode_file_name {
($entry:ident) => {
escape_html_entity(&$entry.file_name().to_string_lossy(), Html)
};
}
pub(crate) fn directory_listing(
dir: &Directory,
req: &HttpRequest,
) -> Result<ServiceResponse, io::Error> {
let index_of = format!("Index of {}", req.path());
let mut body = String::new();
let base = Path::new(req.path());
for entry in dir.path.read_dir()? {
if dir.is_visible(&entry) {
let entry = entry.unwrap();
let p = match entry.path().strip_prefix(&dir.path) {
Ok(p) if cfg!(windows) => base.join(p).to_string_lossy().replace("\\", "/"),
Ok(p) => base.join(p).to_string_lossy().into_owned(),
Err(_) => continue,
};
// if file is a directory, add '/' to the end of the name
if let Ok(metadata) = entry.metadata() {
if metadata.is_dir() {
let _ = write!(
body,
"<li><a href=\"{}\">{}/</a></li>",
encode_file_url!(p),
encode_file_name!(entry),
);
} else {
let _ = write!(
body,
"<li><a href=\"{}\">{}</a></li>",
encode_file_url!(p),
encode_file_name!(entry),
);
}
} else {
continue;
}
}
}
let html = format!(
"<html>\
<head><title>{}</title></head>\
<body><h1>{}</h1>\
<ul>\
{}\
</ul></body>\n</html>",
index_of, index_of, body
);
Ok(ServiceResponse::new(
req.clone(),
HttpResponse::Ok()
.content_type("text/html; charset=utf-8")
.body(html),
))
}

View File

@ -0,0 +1,52 @@
use mime::Mime;
/// Transforms MIME `text/*` types into their UTF-8 equivalent, if supported.
///
/// MIME types that are converted
/// - application/javascript
/// - text/html
/// - text/css
/// - text/plain
/// - text/csv
/// - text/tab-separated-values
pub(crate) fn equiv_utf8_text(ct: Mime) -> Mime {
// use (roughly) order of file-type popularity for a web server
if ct == mime::APPLICATION_JAVASCRIPT {
return mime::APPLICATION_JAVASCRIPT_UTF_8;
}
if ct == mime::TEXT_HTML {
return mime::TEXT_HTML_UTF_8;
}
if ct == mime::TEXT_CSS {
return mime::TEXT_CSS_UTF_8;
}
if ct == mime::TEXT_PLAIN {
return mime::TEXT_PLAIN_UTF_8;
}
if ct == mime::TEXT_CSV {
return mime::TEXT_CSV_UTF_8;
}
if ct == mime::TEXT_TAB_SEPARATED_VALUES {
return mime::TEXT_TAB_SEPARATED_VALUES_UTF_8;
}
ct
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_equiv_utf8_text() {
assert_eq!(equiv_utf8_text(mime::TEXT_PLAIN), mime::TEXT_PLAIN_UTF_8);
assert_eq!(equiv_utf8_text(mime::TEXT_XML), mime::TEXT_XML);
assert_eq!(equiv_utf8_text(mime::IMAGE_PNG), mime::IMAGE_PNG);
}
}

View File

@ -1,4 +1,4 @@
use actix_web::{http::StatusCode, HttpResponse, ResponseError};
use actix_web::{http::StatusCode, ResponseError};
use derive_more::Display;
/// Errors which can occur when serving static files.
@ -16,22 +16,30 @@ pub enum FilesError {
/// Return `NotFound` for `FilesError`
impl ResponseError for FilesError {
fn error_response(&self) -> HttpResponse {
HttpResponse::new(StatusCode::NOT_FOUND)
fn status_code(&self) -> StatusCode {
StatusCode::NOT_FOUND
}
}
#[allow(clippy::enum_variant_names)]
#[derive(Display, Debug, PartialEq)]
#[non_exhaustive]
pub enum UriSegmentError {
/// The segment started with the wrapped invalid character.
#[display(fmt = "The segment started with the wrapped invalid character")]
BadStart(char),
/// The segment contained the wrapped invalid character.
#[display(fmt = "The segment contained the wrapped invalid character")]
BadChar(char),
/// The segment ended with the wrapped invalid character.
#[display(fmt = "The segment ended with the wrapped invalid character")]
BadEnd(char),
/// The path is not a valid UTF-8 string after doing percent decoding.
#[display(fmt = "The path is not a valid UTF-8 string after percent-decoding")]
NotValidUtf8,
}
/// Return `BadRequest` for `UriSegmentError`

392
actix-files/src/files.rs Normal file
View File

@ -0,0 +1,392 @@
use std::{
cell::RefCell,
fmt, io,
path::{Path, PathBuf},
rc::Rc,
};
use actix_service::{boxed, IntoServiceFactory, ServiceFactory, ServiceFactoryExt};
use actix_web::{
dev::{
AppService, HttpServiceFactory, RequestHead, ResourceDef, ServiceRequest,
ServiceResponse,
},
error::Error,
guard::Guard,
http::header::DispositionType,
HttpRequest,
};
use futures_core::future::LocalBoxFuture;
use crate::{
directory_listing, named,
service::{FilesService, FilesServiceInner},
Directory, DirectoryRenderer, HttpNewService, MimeOverride, PathFilter,
};
/// Static files handling service.
///
/// `Files` service must be registered with `App::service()` method.
///
/// # Examples
/// ```
/// use actix_web::App;
/// use actix_files::Files;
///
/// let app = App::new()
/// .service(Files::new("/static", "."));
/// ```
pub struct Files {
path: String,
directory: PathBuf,
index: Option<String>,
show_index: bool,
redirect_to_slash: bool,
default: Rc<RefCell<Option<Rc<HttpNewService>>>>,
renderer: Rc<DirectoryRenderer>,
mime_override: Option<Rc<MimeOverride>>,
path_filter: Option<Rc<PathFilter>>,
file_flags: named::Flags,
use_guards: Option<Rc<dyn Guard>>,
guards: Vec<Rc<dyn Guard>>,
hidden_files: bool,
}
impl fmt::Debug for Files {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("Files")
}
}
impl Clone for Files {
fn clone(&self) -> Self {
Self {
directory: self.directory.clone(),
index: self.index.clone(),
show_index: self.show_index,
redirect_to_slash: self.redirect_to_slash,
default: self.default.clone(),
renderer: self.renderer.clone(),
file_flags: self.file_flags,
path: self.path.clone(),
mime_override: self.mime_override.clone(),
path_filter: self.path_filter.clone(),
use_guards: self.use_guards.clone(),
guards: self.guards.clone(),
hidden_files: self.hidden_files,
}
}
}
impl Files {
/// Create new `Files` instance for a specified base directory.
///
/// # Argument Order
/// The first argument (`mount_path`) is the root URL at which the static files are served.
/// For example, `/assets` will serve files at `example.com/assets/...`.
///
/// The second argument (`serve_from`) is the location on disk at which files are loaded.
/// This can be a relative path. For example, `./` would serve files from the current
/// working directory.
///
/// # Implementation Notes
/// If the mount path is set as the root path `/`, services registered after this one will
/// be inaccessible. Register more specific handlers and services first.
///
/// `Files` utilizes the existing Tokio thread-pool for blocking filesystem operations.
/// The number of running threads is adjusted over time as needed, up to a maximum of 512 times
/// the number of server [workers](actix_web::HttpServer::workers), by default.
pub fn new<T: Into<PathBuf>>(mount_path: &str, serve_from: T) -> Files {
let orig_dir = serve_from.into();
let dir = match orig_dir.canonicalize() {
Ok(canon_dir) => canon_dir,
Err(_) => {
log::error!("Specified path is not a directory: {:?}", orig_dir);
PathBuf::new()
}
};
Files {
path: mount_path.trim_end_matches('/').to_owned(),
directory: dir,
index: None,
show_index: false,
redirect_to_slash: false,
default: Rc::new(RefCell::new(None)),
renderer: Rc::new(directory_listing),
mime_override: None,
path_filter: None,
file_flags: named::Flags::default(),
use_guards: None,
guards: Vec::new(),
hidden_files: false,
}
}
/// Show files listing for directories.
///
/// By default show files listing is disabled.
///
/// When used with [`Files::index_file()`], files listing is shown as a fallback
/// when the index file is not found.
pub fn show_files_listing(mut self) -> Self {
self.show_index = true;
self
}
/// Redirects to a slash-ended path when browsing a directory.
///
/// By default never redirect.
pub fn redirect_to_slash_directory(mut self) -> Self {
self.redirect_to_slash = true;
self
}
/// Set custom directory renderer
pub fn files_listing_renderer<F>(mut self, f: F) -> Self
where
for<'r, 's> F:
Fn(&'r Directory, &'s HttpRequest) -> Result<ServiceResponse, io::Error> + 'static,
{
self.renderer = Rc::new(f);
self
}
/// Specifies mime override callback
pub fn mime_override<F>(mut self, f: F) -> Self
where
F: Fn(&mime::Name<'_>) -> DispositionType + 'static,
{
self.mime_override = Some(Rc::new(f));
self
}
/// Sets path filtering closure.
///
/// The path provided to the closure is relative to `serve_from` path.
/// You can safely join this path with the `serve_from` path to get the real path.
/// However, the real path may not exist since the filter is called before checking path existence.
///
/// When a path doesn't pass the filter, [`Files::default_handler`] is called if set, otherwise,
/// `404 Not Found` is returned.
///
/// # Examples
/// ```
/// use std::path::Path;
/// use actix_files::Files;
///
/// // prevent searching subdirectories and following symlinks
/// let files_service = Files::new("/", "./static").path_filter(|path, _| {
/// path.components().count() == 1
/// && Path::new("./static")
/// .join(path)
/// .symlink_metadata()
/// .map(|m| !m.file_type().is_symlink())
/// .unwrap_or(false)
/// });
/// ```
pub fn path_filter<F>(mut self, f: F) -> Self
where
F: Fn(&Path, &RequestHead) -> bool + 'static,
{
self.path_filter = Some(Rc::new(f));
self
}
/// Set index file
///
/// Shows specific index file for directories instead of
/// showing files listing.
///
/// If the index file is not found, files listing is shown as a fallback if
/// [`Files::show_files_listing()`] is set.
pub fn index_file<T: Into<String>>(mut self, index: T) -> Self {
self.index = Some(index.into());
self
}
/// Specifies whether to use ETag or not.
///
/// Default is true.
pub fn use_etag(mut self, value: bool) -> Self {
self.file_flags.set(named::Flags::ETAG, value);
self
}
/// Specifies whether to use Last-Modified or not.
///
/// Default is true.
pub fn use_last_modified(mut self, value: bool) -> Self {
self.file_flags.set(named::Flags::LAST_MD, value);
self
}
/// Specifies whether text responses should signal a UTF-8 encoding.
///
/// Default is false (but will default to true in a future version).
pub fn prefer_utf8(mut self, value: bool) -> Self {
self.file_flags.set(named::Flags::PREFER_UTF8, value);
self
}
/// Adds a routing guard.
///
/// Use this to allow multiple chained file services that respond to strictly different
/// properties of a request. Due to the way routing works, if a guard check returns true and the
/// request starts being handled by the file service, it will not be able to back-out and try
/// the next service, you will simply get a 404 (or 405) error response.
///
/// To allow `POST` requests to retrieve files, see [`Files::use_guards`].
///
/// # Examples
/// ```
/// use actix_web::{guard::Header, App};
/// use actix_files::Files;
///
/// App::new().service(
/// Files::new("/","/my/site/files")
/// .guard(Header("Host", "example.com"))
/// );
/// ```
pub fn guard<G: Guard + 'static>(mut self, guard: G) -> Self {
self.guards.push(Rc::new(guard));
self
}
/// Specifies guard to check before fetching directory listings or files.
///
/// Note that this guard has no effect on routing; it's main use is to guard on the request's
/// method just before serving the file, only allowing `GET` and `HEAD` requests by default.
/// See [`Files::guard`] for routing guards.
pub fn method_guard<G: Guard + 'static>(mut self, guard: G) -> Self {
self.use_guards = Some(Rc::new(guard));
self
}
/// See [`Files::method_guard`].
#[doc(hidden)]
#[deprecated(since = "0.6.0", note = "Renamed to `method_guard`.")]
pub fn use_guards<G: Guard + 'static>(self, guard: G) -> Self {
self.method_guard(guard)
}
/// Disable `Content-Disposition` header.
///
/// By default Content-Disposition` header is enabled.
pub fn disable_content_disposition(mut self) -> Self {
self.file_flags.remove(named::Flags::CONTENT_DISPOSITION);
self
}
/// Sets default handler which is used when no matched file could be found.
///
/// # Examples
/// Setting a fallback static file handler:
/// ```
/// use actix_files::{Files, NamedFile};
/// use actix_web::dev::{ServiceRequest, ServiceResponse, fn_service};
///
/// # fn run() -> Result<(), actix_web::Error> {
/// let files = Files::new("/", "./static")
/// .index_file("index.html")
/// .default_handler(fn_service(|req: ServiceRequest| async {
/// let (req, _) = req.into_parts();
/// let file = NamedFile::open_async("./static/404.html").await?;
/// let res = file.into_response(&req);
/// Ok(ServiceResponse::new(req, res))
/// }));
/// # Ok(())
/// # }
/// ```
pub fn default_handler<F, U>(mut self, f: F) -> Self
where
F: IntoServiceFactory<U, ServiceRequest>,
U: ServiceFactory<
ServiceRequest,
Config = (),
Response = ServiceResponse,
Error = Error,
> + 'static,
{
// create and configure default resource
self.default = Rc::new(RefCell::new(Some(Rc::new(boxed::factory(
f.into_factory().map_init_err(|_| ()),
)))));
self
}
/// Enables serving hidden files and directories, allowing a leading dots in url fragments.
pub fn use_hidden_files(mut self) -> Self {
self.hidden_files = true;
self
}
}
impl HttpServiceFactory for Files {
fn register(mut self, config: &mut AppService) {
let guards = if self.guards.is_empty() {
None
} else {
let guards = std::mem::take(&mut self.guards);
Some(
guards
.into_iter()
.map(|guard| -> Box<dyn Guard> { Box::new(guard) })
.collect::<Vec<_>>(),
)
};
if self.default.borrow().is_none() {
*self.default.borrow_mut() = Some(config.default_service());
}
let rdef = if config.is_root() {
ResourceDef::root_prefix(&self.path)
} else {
ResourceDef::prefix(&self.path)
};
config.register_service(rdef, guards, self, None)
}
}
impl ServiceFactory<ServiceRequest> for Files {
type Response = ServiceResponse;
type Error = Error;
type Config = ();
type Service = FilesService;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
let mut inner = FilesServiceInner {
directory: self.directory.clone(),
index: self.index.clone(),
show_index: self.show_index,
redirect_to_slash: self.redirect_to_slash,
default: None,
renderer: self.renderer.clone(),
mime_override: self.mime_override.clone(),
path_filter: self.path_filter.clone(),
file_flags: self.file_flags,
guards: self.use_guards.clone(),
hidden_files: self.hidden_files,
};
if let Some(ref default) = *self.default.borrow() {
let fut = default.new_service(());
Box::pin(async {
match fut.await {
Ok(default) => {
inner.default = Some(default);
Ok(FilesService(Rc::new(inner)))
}
Err(_) => Err(()),
}
})
} else {
Box::pin(async move { Ok(FilesService(Rc::new(inner))) })
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,46 +1,77 @@
use std::fs::{File, Metadata};
use std::io;
use std::ops::{Deref, DerefMut};
use std::path::{Path, PathBuf};
use std::time::{SystemTime, UNIX_EPOCH};
#[cfg(unix)]
use std::os::unix::fs::MetadataExt;
use std::{
fs::Metadata,
io,
path::{Path, PathBuf},
time::{SystemTime, UNIX_EPOCH},
};
use actix_web::{
body::{self, BoxBody, SizedStream},
dev::{
self, AppService, HttpServiceFactory, ResourceDef, Service, ServiceFactory,
ServiceRequest, ServiceResponse,
},
http::{
header::{
self, Charset, ContentDisposition, ContentEncoding, DispositionParam,
DispositionType, ExtendedValue, HeaderValue,
},
StatusCode,
},
Error, HttpMessage, HttpRequest, HttpResponse, Responder,
};
use bitflags::bitflags;
use derive_more::{Deref, DerefMut};
use futures_core::future::LocalBoxFuture;
use mime_guess::from_path;
use actix_http::body::SizedStream;
use actix_web::dev::BodyEncoding;
use actix_web::http::header::{
self, Charset, ContentDisposition, DispositionParam, DispositionType, ExtendedValue,
};
use actix_web::http::{ContentEncoding, StatusCode};
use actix_web::{Error, HttpMessage, HttpRequest, HttpResponse, Responder};
use futures_util::future::{ready, Ready};
use crate::range::HttpRange;
use crate::ChunkedReadFile;
use crate::{encoding::equiv_utf8_text, range::HttpRange};
bitflags! {
pub(crate) struct Flags: u8 {
const ETAG = 0b0000_0001;
const LAST_MD = 0b0000_0010;
const ETAG = 0b0000_0001;
const LAST_MD = 0b0000_0010;
const CONTENT_DISPOSITION = 0b0000_0100;
const PREFER_UTF8 = 0b0000_1000;
}
}
impl Default for Flags {
fn default() -> Self {
Flags::all()
Flags::from_bits_truncate(0b0000_1111)
}
}
/// A file with an associated name.
#[derive(Debug)]
///
/// `NamedFile` can be registered as services:
/// ```
/// use actix_web::App;
/// use actix_files::NamedFile;
///
/// # async fn run() -> Result<(), Box<dyn std::error::Error>> {
/// let file = NamedFile::open_async("./static/index.html").await?;
/// let app = App::new().service(file);
/// # Ok(())
/// # }
/// ```
///
/// They can also be returned from handlers:
/// ```
/// use actix_web::{Responder, get};
/// use actix_files::NamedFile;
///
/// #[get("/")]
/// async fn index() -> impl Responder {
/// NamedFile::open_async("./static/index.html").await
/// }
/// ```
#[derive(Debug, Deref, DerefMut)]
pub struct NamedFile {
path: PathBuf,
#[deref]
#[deref_mut]
file: File,
path: PathBuf,
modified: Option<SystemTime>,
pub(crate) md: Metadata,
pub(crate) flags: Flags,
@ -50,6 +81,13 @@ pub struct NamedFile {
pub(crate) encoding: Option<ContentEncoding>,
}
#[cfg(not(feature = "experimental-io-uring"))]
pub(crate) use std::fs::File;
#[cfg(feature = "experimental-io-uring")]
pub(crate) use tokio_uring::fs::File;
use super::chunked;
impl NamedFile {
/// Creates an instance from a previously opened file.
///
@ -57,8 +95,7 @@ impl NamedFile {
/// `ContentDisposition` headers.
///
/// # Examples
///
/// ```rust
/// ```ignore
/// use actix_files::NamedFile;
/// use std::io::{self, Write};
/// use std::env;
@ -89,12 +126,20 @@ impl NamedFile {
};
let ct = from_path(&path).first_or_octet_stream();
let disposition = match ct.type_() {
mime::IMAGE | mime::TEXT | mime::VIDEO => DispositionType::Inline,
mime::APPLICATION => match ct.subtype() {
mime::JAVASCRIPT | mime::JSON => DispositionType::Inline,
name if name == "wasm" => DispositionType::Inline,
_ => DispositionType::Attachment,
},
_ => DispositionType::Attachment,
};
let mut parameters =
vec![DispositionParam::Filename(String::from(filename.as_ref()))];
if !filename.is_ascii() {
parameters.push(DispositionParam::FilenameExt(ExtendedValue {
charset: Charset::Ext(String::from("UTF-8")),
@ -102,16 +147,42 @@ impl NamedFile {
value: filename.into_owned().into_bytes(),
}))
}
let cd = ContentDisposition {
disposition,
parameters,
};
(ct, cd)
};
let md = file.metadata()?;
let md = {
#[cfg(not(feature = "experimental-io-uring"))]
{
file.metadata()?
}
#[cfg(feature = "experimental-io-uring")]
{
use std::os::unix::prelude::{AsRawFd, FromRawFd};
let fd = file.as_raw_fd();
// SAFETY: fd is borrowed and lives longer than the unsafe block
unsafe {
let file = std::fs::File::from_raw_fd(fd);
let md = file.metadata();
// SAFETY: forget the fd before exiting block in success or error case but don't
// run destructor (that would close file handle)
std::mem::forget(file);
md?
}
}
};
let modified = md.modified().ok();
let encoding = None;
Ok(NamedFile {
path,
file,
@ -128,14 +199,42 @@ impl NamedFile {
/// Attempts to open a file in read-only mode.
///
/// # Examples
///
/// ```rust
/// ```
/// use actix_files::NamedFile;
///
/// let file = NamedFile::open("foo.txt");
/// ```
#[cfg(not(feature = "experimental-io-uring"))]
pub fn open<P: AsRef<Path>>(path: P) -> io::Result<NamedFile> {
Self::from_file(File::open(&path)?, path)
let file = File::open(&path)?;
Self::from_file(file, path)
}
/// Attempts to open a file asynchronously in read-only mode.
///
/// When the `experimental-io-uring` crate feature is enabled, this will be async.
/// Otherwise, it will be just like [`open`][Self::open].
///
/// # Examples
/// ```
/// use actix_files::NamedFile;
/// # async fn open() {
/// let file = NamedFile::open_async("foo.txt").await.unwrap();
/// # }
/// ```
pub async fn open_async<P: AsRef<Path>>(path: P) -> io::Result<NamedFile> {
let file = {
#[cfg(not(feature = "experimental-io-uring"))]
{
File::open(&path)?
}
#[cfg(feature = "experimental-io-uring")]
{
File::open(&path).await?
}
};
Self::from_file(file, path)
}
/// Returns reference to the underlying `File` object.
@ -147,13 +246,12 @@ impl NamedFile {
/// Retrieve the path of this file.
///
/// # Examples
///
/// ```rust
/// ```
/// # use std::io;
/// use actix_files::NamedFile;
///
/// # fn path() -> io::Result<()> {
/// let file = NamedFile::open("test.txt")?;
/// # async fn path() -> io::Result<()> {
/// let file = NamedFile::open_async("test.txt").await?;
/// assert_eq!(file.path().as_os_str(), "foo.txt");
/// # Ok(())
/// # }
@ -169,21 +267,21 @@ impl NamedFile {
self
}
/// Set the MIME Content-Type for serving this file. By default
/// the Content-Type is inferred from the filename extension.
/// Set the MIME Content-Type for serving this file. By default the Content-Type is inferred
/// from the filename extension.
#[inline]
pub fn set_content_type(mut self, mime_type: mime::Mime) -> Self {
self.content_type = mime_type;
self
}
/// Set the Content-Disposition for serving this file. This allows
/// changing the inline/attachment disposition as well as the filename
/// sent to the peer. By default the disposition is `inline` for text,
/// image, and video content types, and `attachment` otherwise, and
/// the filename is taken from the path provided in the `open` method
/// after converting it to UTF-8 using.
/// [to_string_lossy](https://doc.rust-lang.org/std/ffi/struct.OsStr.html#method.to_string_lossy).
/// Set the Content-Disposition for serving this file. This allows changing the
/// `inline/attachment` disposition as well as the filename sent to the peer.
///
/// By default the disposition is `inline` for `text/*`, `image/*`, `video/*` and
/// `application/{javascript, json, wasm}` mime types, and `attachment` otherwise, and the
/// filename is taken from the path provided in the `open` method after converting it to UTF-8
/// (using `to_string_lossy`).
#[inline]
pub fn set_content_disposition(mut self, cd: header::ContentDisposition) -> Self {
self.content_disposition = cd;
@ -201,38 +299,53 @@ impl NamedFile {
}
/// Set content encoding for serving this file
///
/// Must be used with [`actix_web::middleware::Compress`] to take effect.
#[inline]
pub fn set_content_encoding(mut self, enc: ContentEncoding) -> Self {
self.encoding = Some(enc);
self
}
#[inline]
///Specifies whether to use ETag or not.
/// Specifies whether to return `ETag` header in response.
///
///Default is true.
/// Default is true.
#[inline]
pub fn use_etag(mut self, value: bool) -> Self {
self.flags.set(Flags::ETAG, value);
self
}
#[inline]
///Specifies whether to use Last-Modified or not.
/// Specifies whether to return `Last-Modified` header in response.
///
///Default is true.
/// Default is true.
#[inline]
pub fn use_last_modified(mut self, value: bool) -> Self {
self.flags.set(Flags::LAST_MD, value);
self
}
/// Specifies whether text responses should signal a UTF-8 encoding.
///
/// Default is false (but will default to true in a future version).
#[inline]
pub fn prefer_utf8(mut self, value: bool) -> Self {
self.flags.set(Flags::PREFER_UTF8, value);
self
}
/// Creates an `ETag` in a format is similar to Apache's.
pub(crate) fn etag(&self) -> Option<header::EntityTag> {
// This etag format is similar to Apache's.
self.modified.as_ref().map(|mtime| {
let ino = {
#[cfg(unix)]
{
#[cfg(unix)]
use std::os::unix::fs::MetadataExt as _;
self.md.ino()
}
#[cfg(not(unix))]
{
0
@ -242,7 +355,8 @@ impl NamedFile {
let dur = mtime
.duration_since(UNIX_EPOCH)
.expect("modification time must be after epoch");
header::EntityTag::strong(format!(
header::EntityTag::new_strong(format!(
"{:x}:{:x}:{:x}:{:x}",
ino,
self.md.len(),
@ -256,27 +370,33 @@ impl NamedFile {
self.modified.map(|mtime| mtime.into())
}
pub fn into_response(self, req: &HttpRequest) -> Result<HttpResponse, Error> {
/// Creates an `HttpResponse` with file as a streaming body.
pub fn into_response(self, req: &HttpRequest) -> HttpResponse<BoxBody> {
if self.status_code != StatusCode::OK {
let mut resp = HttpResponse::build(self.status_code);
resp.set(header::ContentType(self.content_type.clone()))
.if_true(self.flags.contains(Flags::CONTENT_DISPOSITION), |res| {
res.header(
header::CONTENT_DISPOSITION,
self.content_disposition.to_string(),
);
});
if let Some(current_encoding) = self.encoding {
resp.encoding(current_encoding);
}
let reader = ChunkedReadFile {
size: self.md.len(),
offset: 0,
file: Some(self.file),
fut: None,
counter: 0,
let mut res = HttpResponse::build(self.status_code);
let ct = if self.flags.contains(Flags::PREFER_UTF8) {
equiv_utf8_text(self.content_type.clone())
} else {
self.content_type
};
return Ok(resp.streaming(reader));
res.insert_header((header::CONTENT_TYPE, ct.to_string()));
if self.flags.contains(Flags::CONTENT_DISPOSITION) {
res.insert_header((
header::CONTENT_DISPOSITION,
self.content_disposition.to_string(),
));
}
if let Some(current_encoding) = self.encoding {
res.insert_header((header::CONTENT_ENCODING, current_encoding.as_str()));
}
let reader = chunked::new_chunked_read(self.md.len(), 0, self.file);
return res.streaming(reader);
}
let etag = if self.flags.contains(Flags::ETAG) {
@ -284,6 +404,7 @@ impl NamedFile {
} else {
None
};
let last_modified = if self.flags.contains(Flags::LAST_MD) {
self.last_modified()
} else {
@ -296,10 +417,11 @@ impl NamedFile {
} else if let (Some(ref m), Some(header::IfUnmodifiedSince(ref since))) =
(last_modified, req.get_header())
{
let t1: SystemTime = m.clone().into();
let t2: SystemTime = since.clone().into();
let t1: SystemTime = (*m).into();
let t2: SystemTime = (*since).into();
match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) {
(Ok(t1), Ok(t2)) => t1 > t2,
(Ok(t1), Ok(t2)) => t1.as_secs() > t2.as_secs(),
_ => false,
}
} else {
@ -309,104 +431,98 @@ impl NamedFile {
// check last modified
let not_modified = if !none_match(etag.as_ref(), req) {
true
} else if req.headers().contains_key(&header::IF_NONE_MATCH) {
} else if req.headers().contains_key(header::IF_NONE_MATCH) {
false
} else if let (Some(ref m), Some(header::IfModifiedSince(ref since))) =
(last_modified, req.get_header())
{
let t1: SystemTime = m.clone().into();
let t2: SystemTime = since.clone().into();
let t1: SystemTime = (*m).into();
let t2: SystemTime = (*since).into();
match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) {
(Ok(t1), Ok(t2)) => t1 <= t2,
(Ok(t1), Ok(t2)) => t1.as_secs() <= t2.as_secs(),
_ => false,
}
} else {
false
};
let mut resp = HttpResponse::build(self.status_code);
resp.set(header::ContentType(self.content_type.clone()))
.if_true(self.flags.contains(Flags::CONTENT_DISPOSITION), |res| {
res.header(
header::CONTENT_DISPOSITION,
self.content_disposition.to_string(),
);
});
// default compressing
if let Some(current_encoding) = self.encoding {
resp.encoding(current_encoding);
let mut res = HttpResponse::build(self.status_code);
let ct = if self.flags.contains(Flags::PREFER_UTF8) {
equiv_utf8_text(self.content_type.clone())
} else {
self.content_type
};
res.insert_header((header::CONTENT_TYPE, ct.to_string()));
if self.flags.contains(Flags::CONTENT_DISPOSITION) {
res.insert_header((
header::CONTENT_DISPOSITION,
self.content_disposition.to_string(),
));
}
resp.if_some(last_modified, |lm, resp| {
resp.set(header::LastModified(lm));
})
.if_some(etag, |etag, resp| {
resp.set(header::ETag(etag));
});
if let Some(current_encoding) = self.encoding {
res.insert_header((header::CONTENT_ENCODING, current_encoding.as_str()));
}
resp.header(header::ACCEPT_RANGES, "bytes");
if let Some(lm) = last_modified {
res.insert_header((header::LAST_MODIFIED, lm.to_string()));
}
if let Some(etag) = etag {
res.insert_header((header::ETAG, etag.to_string()));
}
res.insert_header((header::ACCEPT_RANGES, "bytes"));
let mut length = self.md.len();
let mut offset = 0;
// check for range header
if let Some(ranges) = req.headers().get(&header::RANGE) {
if let Ok(rangesheader) = ranges.to_str() {
if let Ok(rangesvec) = HttpRange::parse(rangesheader, length) {
length = rangesvec[0].length;
offset = rangesvec[0].start;
resp.encoding(ContentEncoding::Identity);
resp.header(
if let Some(ranges) = req.headers().get(header::RANGE) {
if let Ok(ranges_header) = ranges.to_str() {
if let Ok(ranges) = HttpRange::parse(ranges_header, length) {
length = ranges[0].length;
offset = ranges[0].start;
// don't allow compression middleware to modify partial content
res.insert_header((
header::CONTENT_ENCODING,
HeaderValue::from_static("identity"),
));
res.insert_header((
header::CONTENT_RANGE,
format!(
"bytes {}-{}/{}",
offset,
offset + length - 1,
self.md.len()
),
);
format!("bytes {}-{}/{}", offset, offset + length - 1, self.md.len()),
));
} else {
resp.header(header::CONTENT_RANGE, format!("bytes */{}", length));
return Ok(resp.status(StatusCode::RANGE_NOT_SATISFIABLE).finish());
res.insert_header((header::CONTENT_RANGE, format!("bytes */{}", length)));
return res.status(StatusCode::RANGE_NOT_SATISFIABLE).finish();
};
} else {
return Ok(resp.status(StatusCode::BAD_REQUEST).finish());
return res.status(StatusCode::BAD_REQUEST).finish();
};
};
if precondition_failed {
return Ok(resp.status(StatusCode::PRECONDITION_FAILED).finish());
return res.status(StatusCode::PRECONDITION_FAILED).finish();
} else if not_modified {
return Ok(resp.status(StatusCode::NOT_MODIFIED).finish());
return res
.status(StatusCode::NOT_MODIFIED)
.body(body::None::new())
.map_into_boxed_body();
}
let reader = ChunkedReadFile {
offset,
size: length,
file: Some(self.file),
fut: None,
counter: 0,
};
let reader = chunked::new_chunked_read(length, offset, self.file);
if offset != 0 || length != self.md.len() {
resp.status(StatusCode::PARTIAL_CONTENT);
res.status(StatusCode::PARTIAL_CONTENT);
}
Ok(resp.body(SizedStream::new(length, reader)))
}
}
impl Deref for NamedFile {
type Target = File;
fn deref(&self) -> &File {
&self.file
}
}
impl DerefMut for NamedFile {
fn deref_mut(&mut self) -> &mut File {
&mut self.file
res.body(SizedStream::new(length, reader))
}
}
@ -414,6 +530,7 @@ impl DerefMut for NamedFile {
fn any_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
match req.get_header::<header::IfMatch>() {
None | Some(header::IfMatch::Any) => true,
Some(header::IfMatch::Items(ref items)) => {
if let Some(some_etag) = etag {
for item in items {
@ -422,6 +539,7 @@ fn any_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
}
}
}
false
}
}
@ -431,6 +549,7 @@ fn any_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
fn none_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
match req.get_header::<header::IfNoneMatch>() {
Some(header::IfNoneMatch::Any) => false,
Some(header::IfNoneMatch::Items(ref items)) => {
if let Some(some_etag) = etag {
for item in items {
@ -439,17 +558,71 @@ fn none_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
}
}
}
true
}
None => true,
}
}
impl Responder for NamedFile {
type Error = Error;
type Future = Ready<Result<HttpResponse, Error>>;
type Body = BoxBody;
fn respond_to(self, req: &HttpRequest) -> Self::Future {
ready(self.into_response(req))
fn respond_to(self, req: &HttpRequest) -> HttpResponse<Self::Body> {
self.into_response(req)
}
}
impl ServiceFactory<ServiceRequest> for NamedFile {
type Response = ServiceResponse;
type Error = Error;
type Config = ();
type Service = NamedFileService;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
let service = NamedFileService {
path: self.path.clone(),
};
Box::pin(async move { Ok(service) })
}
}
#[doc(hidden)]
#[derive(Debug)]
pub struct NamedFileService {
path: PathBuf,
}
impl Service<ServiceRequest> for NamedFileService {
type Response = ServiceResponse;
type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
dev::always_ready!();
fn call(&self, req: ServiceRequest) -> Self::Future {
let (req, _) = req.into_parts();
let path = self.path.clone();
Box::pin(async move {
let file = NamedFile::open_async(path).await?;
let res = file.into_response(&req);
Ok(ServiceResponse::new(req, res))
})
}
}
impl HttpServiceFactory for NamedFile {
fn register(self, config: &mut AppService) {
config.register_service(
ResourceDef::root_prefix(self.path.to_string_lossy().as_ref()),
None,
self,
None,
)
}
}

162
actix-files/src/path_buf.rs Normal file
View File

@ -0,0 +1,162 @@
use std::{
path::{Component, Path, PathBuf},
str::FromStr,
};
use actix_utils::future::{ready, Ready};
use actix_web::{dev::Payload, FromRequest, HttpRequest};
use crate::error::UriSegmentError;
#[derive(Debug, PartialEq, Eq)]
pub(crate) struct PathBufWrap(PathBuf);
impl FromStr for PathBufWrap {
type Err = UriSegmentError;
fn from_str(path: &str) -> Result<Self, Self::Err> {
Self::parse_path(path, false)
}
}
impl PathBufWrap {
/// Parse a path, giving the choice of allowing hidden files to be considered valid segments.
///
/// Path traversal is guarded by this method.
pub fn parse_path(path: &str, hidden_files: bool) -> Result<Self, UriSegmentError> {
let mut buf = PathBuf::new();
// equivalent to `path.split('/').count()`
let mut segment_count = path.matches('/').count() + 1;
// we can decode the whole path here (instead of per-segment decoding)
// because we will reject `%2F` in paths using `segement_count`.
let path = percent_encoding::percent_decode_str(path)
.decode_utf8()
.map_err(|_| UriSegmentError::NotValidUtf8)?;
// disallow decoding `%2F` into `/`
if segment_count != path.matches('/').count() + 1 {
return Err(UriSegmentError::BadChar('/'));
}
for segment in path.split('/') {
if segment == ".." {
segment_count -= 1;
buf.pop();
} else if !hidden_files && segment.starts_with('.') {
return Err(UriSegmentError::BadStart('.'));
} else if segment.starts_with('*') {
return Err(UriSegmentError::BadStart('*'));
} else if segment.ends_with(':') {
return Err(UriSegmentError::BadEnd(':'));
} else if segment.ends_with('>') {
return Err(UriSegmentError::BadEnd('>'));
} else if segment.ends_with('<') {
return Err(UriSegmentError::BadEnd('<'));
} else if segment.is_empty() {
segment_count -= 1;
continue;
} else if cfg!(windows) && segment.contains('\\') {
return Err(UriSegmentError::BadChar('\\'));
} else {
buf.push(segment)
}
}
// make sure we agree with stdlib parser
for (i, component) in buf.components().enumerate() {
assert!(matches!(component, Component::Normal(_)));
assert!(i < segment_count);
}
Ok(PathBufWrap(buf))
}
}
impl AsRef<Path> for PathBufWrap {
fn as_ref(&self) -> &Path {
self.0.as_ref()
}
}
impl FromRequest for PathBufWrap {
type Error = UriSegmentError;
type Future = Ready<Result<Self, Self::Error>>;
fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future {
ready(req.match_info().unprocessed().parse())
}
}
#[cfg(test)]
mod tests {
use std::iter::FromIterator;
use super::*;
#[test]
fn test_path_buf() {
assert_eq!(
PathBufWrap::from_str("/test/.tt").map(|t| t.0),
Err(UriSegmentError::BadStart('.'))
);
assert_eq!(
PathBufWrap::from_str("/test/*tt").map(|t| t.0),
Err(UriSegmentError::BadStart('*'))
);
assert_eq!(
PathBufWrap::from_str("/test/tt:").map(|t| t.0),
Err(UriSegmentError::BadEnd(':'))
);
assert_eq!(
PathBufWrap::from_str("/test/tt<").map(|t| t.0),
Err(UriSegmentError::BadEnd('<'))
);
assert_eq!(
PathBufWrap::from_str("/test/tt>").map(|t| t.0),
Err(UriSegmentError::BadEnd('>'))
);
assert_eq!(
PathBufWrap::from_str("/seg1/seg2/").unwrap().0,
PathBuf::from_iter(vec!["seg1", "seg2"])
);
assert_eq!(
PathBufWrap::from_str("/seg1/../seg2/").unwrap().0,
PathBuf::from_iter(vec!["seg2"])
);
}
#[test]
fn test_parse_path() {
assert_eq!(
PathBufWrap::parse_path("/test/.tt", false).map(|t| t.0),
Err(UriSegmentError::BadStart('.'))
);
assert_eq!(
PathBufWrap::parse_path("/test/.tt", true).unwrap().0,
PathBuf::from_iter(vec!["test", ".tt"])
);
}
#[test]
fn path_traversal() {
assert_eq!(
PathBufWrap::parse_path("/../README.md", false).unwrap().0,
PathBuf::from_iter(vec!["README.md"])
);
assert_eq!(
PathBufWrap::parse_path("/../README.md", true).unwrap().0,
PathBuf::from_iter(vec!["README.md"])
);
assert_eq!(
PathBufWrap::parse_path("/../../../../../../../../../../etc/passwd", false)
.unwrap()
.0,
PathBuf::from_iter(vec!["etc/passwd"])
);
}
}

View File

@ -1,95 +1,35 @@
use derive_more::{Display, Error};
/// HTTP Range header representation.
#[derive(Debug, Clone, Copy)]
pub struct HttpRange {
/// Start of range.
pub start: u64,
/// Length of range.
pub length: u64,
}
static PREFIX: &str = "bytes=";
const PREFIX_LEN: usize = 6;
#[derive(Debug, Clone, Display, Error)]
#[display(fmt = "Parse HTTP Range failed")]
pub struct ParseRangeErr(#[error(not(source))] ());
impl HttpRange {
/// Parses Range HTTP header string as per RFC 2616.
///
/// `header` is HTTP Range header (e.g. `bytes=bytes=0-9`).
/// `size` is full size of response (file).
pub fn parse(header: &str, size: u64) -> Result<Vec<HttpRange>, ()> {
if header.is_empty() {
return Ok(Vec::new());
pub fn parse(header: &str, size: u64) -> Result<Vec<HttpRange>, ParseRangeErr> {
match http_range::HttpRange::parse(header, size) {
Ok(ranges) => Ok(ranges
.iter()
.map(|range| HttpRange {
start: range.start,
length: range.length,
})
.collect()),
Err(_) => Err(ParseRangeErr(())),
}
if !header.starts_with(PREFIX) {
return Err(());
}
let size_sig = size as i64;
let mut no_overlap = false;
let all_ranges: Vec<Option<HttpRange>> = header[PREFIX_LEN..]
.split(',')
.map(|x| x.trim())
.filter(|x| !x.is_empty())
.map(|ra| {
let mut start_end_iter = ra.split('-');
let start_str = start_end_iter.next().ok_or(())?.trim();
let end_str = start_end_iter.next().ok_or(())?.trim();
if start_str.is_empty() {
// If no start is specified, end specifies the
// range start relative to the end of the file.
let mut length: i64 = end_str.parse().map_err(|_| ())?;
if length > size_sig {
length = size_sig;
}
Ok(Some(HttpRange {
start: (size_sig - length) as u64,
length: length as u64,
}))
} else {
let start: i64 = start_str.parse().map_err(|_| ())?;
if start < 0 {
return Err(());
}
if start >= size_sig {
no_overlap = true;
return Ok(None);
}
let length = if end_str.is_empty() {
// If no end is specified, range extends to end of the file.
size_sig - start
} else {
let mut end: i64 = end_str.parse().map_err(|_| ())?;
if start > end {
return Err(());
}
if end >= size_sig {
end = size_sig - 1;
}
end - start + 1
};
Ok(Some(HttpRange {
start: start as u64,
length: length as u64,
}))
}
})
.collect::<Result<_, _>>()?;
let ranges: Vec<HttpRange> = all_ranges.into_iter().filter_map(|x| x).collect();
if no_overlap && ranges.is_empty() {
return Err(());
}
Ok(ranges)
}
}
@ -330,8 +270,7 @@ mod tests {
if expected.is_empty() {
continue;
} else {
assert!(
false,
panic!(
"parse({}, {}) returned error {:?}",
header,
size,
@ -343,28 +282,24 @@ mod tests {
let got = res.unwrap();
if got.len() != expected.len() {
assert!(
false,
panic!(
"len(parseRange({}, {})) = {}, want {}",
header,
size,
got.len(),
expected.len()
);
continue;
}
for i in 0..expected.len() {
if got[i].start != expected[i].start {
assert!(
false,
panic!(
"parseRange({}, {})[{}].start = {}, want {}",
header, size, i, got[i].start, expected[i].start
)
}
if got[i].length != expected[i].length {
assert!(
false,
panic!(
"parseRange({}, {})[{}].length = {}, want {}",
header, size, i, got[i].length, expected[i].length
)

195
actix-files/src/service.rs Normal file
View File

@ -0,0 +1,195 @@
use std::{fmt, io, ops::Deref, path::PathBuf, rc::Rc};
use actix_web::{
body::BoxBody,
dev::{self, Service, ServiceRequest, ServiceResponse},
error::Error,
guard::Guard,
http::{header, Method},
HttpResponse,
};
use futures_core::future::LocalBoxFuture;
use crate::{
named, Directory, DirectoryRenderer, FilesError, HttpService, MimeOverride, NamedFile,
PathBufWrap, PathFilter,
};
/// Assembled file serving service.
#[derive(Clone)]
pub struct FilesService(pub(crate) Rc<FilesServiceInner>);
impl Deref for FilesService {
type Target = FilesServiceInner;
fn deref(&self) -> &Self::Target {
&*self.0
}
}
pub struct FilesServiceInner {
pub(crate) directory: PathBuf,
pub(crate) index: Option<String>,
pub(crate) show_index: bool,
pub(crate) redirect_to_slash: bool,
pub(crate) default: Option<HttpService>,
pub(crate) renderer: Rc<DirectoryRenderer>,
pub(crate) mime_override: Option<Rc<MimeOverride>>,
pub(crate) path_filter: Option<Rc<PathFilter>>,
pub(crate) file_flags: named::Flags,
pub(crate) guards: Option<Rc<dyn Guard>>,
pub(crate) hidden_files: bool,
}
impl fmt::Debug for FilesServiceInner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("FilesServiceInner")
}
}
impl FilesService {
async fn handle_err(
&self,
err: io::Error,
req: ServiceRequest,
) -> Result<ServiceResponse, Error> {
log::debug!("error handling {}: {}", req.path(), err);
if let Some(ref default) = self.default {
default.call(req).await
} else {
Ok(req.error_response(err))
}
}
fn serve_named_file(
&self,
req: ServiceRequest,
mut named_file: NamedFile,
) -> ServiceResponse {
if let Some(ref mime_override) = self.mime_override {
let new_disposition = mime_override(&named_file.content_type.type_());
named_file.content_disposition.disposition = new_disposition;
}
named_file.flags = self.file_flags;
let (req, _) = req.into_parts();
let res = named_file.into_response(&req);
ServiceResponse::new(req, res)
}
fn show_index(&self, req: ServiceRequest, path: PathBuf) -> ServiceResponse {
let dir = Directory::new(self.directory.clone(), path);
let (req, _) = req.into_parts();
(self.renderer)(&dir, &req).unwrap_or_else(|e| ServiceResponse::from_err(e, req))
}
}
impl fmt::Debug for FilesService {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("FilesService")
}
}
impl Service<ServiceRequest> for FilesService {
type Response = ServiceResponse<BoxBody>;
type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
dev::always_ready!();
fn call(&self, req: ServiceRequest) -> Self::Future {
let is_method_valid = if let Some(guard) = &self.guards {
// execute user defined guards
(**guard).check(&req.guard_ctx())
} else {
// default behavior
matches!(*req.method(), Method::HEAD | Method::GET)
};
let this = self.clone();
Box::pin(async move {
if !is_method_valid {
return Ok(req.into_response(
HttpResponse::MethodNotAllowed()
.insert_header(header::ContentType(mime::TEXT_PLAIN_UTF_8))
.body("Request did not meet this resource's requirements."),
));
}
let path_on_disk = match PathBufWrap::parse_path(
req.match_info().unprocessed(),
this.hidden_files,
) {
Ok(item) => item,
Err(err) => return Ok(req.error_response(err)),
};
if let Some(filter) = &this.path_filter {
if !filter(path_on_disk.as_ref(), req.head()) {
if let Some(ref default) = this.default {
return default.call(req).await;
} else {
return Ok(req.into_response(HttpResponse::NotFound().finish()));
}
}
}
// full file path
let path = this.directory.join(&path_on_disk);
if let Err(err) = path.canonicalize() {
return this.handle_err(err, req).await;
}
if path.is_dir() {
if this.redirect_to_slash
&& !req.path().ends_with('/')
&& (this.index.is_some() || this.show_index)
{
let redirect_to = format!("{}/", req.path());
return Ok(req.into_response(
HttpResponse::Found()
.insert_header((header::LOCATION, redirect_to))
.finish(),
));
}
match this.index {
Some(ref index) => {
let named_path = path.join(index);
match NamedFile::open_async(named_path).await {
Ok(named_file) => Ok(this.serve_named_file(req, named_file)),
Err(_) if this.show_index => Ok(this.show_index(req, path)),
Err(err) => this.handle_err(err, req).await,
}
}
None if this.show_index => Ok(this.show_index(req, path)),
_ => Ok(ServiceResponse::from_err(
FilesError::IsDirectory,
req.into_parts().0,
)),
}
} else {
match NamedFile::open_async(&path).await {
Ok(mut named_file) => {
if let Some(ref mime_override) = this.mime_override {
let new_disposition =
mime_override(&named_file.content_type.type_());
named_file.content_disposition.disposition = new_disposition;
}
named_file.flags = this.file_flags;
let (req, _) = req.into_parts();
let res = named_file.into_response(&req);
Ok(ServiceResponse::new(req, res))
}
Err(err) => this.handle_err(err, req).await,
}
}
})
}
}

View File

@ -0,0 +1,38 @@
use actix_files::Files;
use actix_web::{
http::{
header::{self, HeaderValue},
StatusCode,
},
test::{self, TestRequest},
App,
};
#[actix_web::test]
async fn test_utf8_file_contents() {
// use default ISO-8859-1 encoding
let srv = test::init_service(App::new().service(Files::new("/", "./tests"))).await;
let req = TestRequest::with_uri("/utf8.txt").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(
res.headers().get(header::CONTENT_TYPE),
Some(&HeaderValue::from_static("text/plain; charset=utf-8")),
);
// disable UTF-8 attribute
let srv =
test::init_service(App::new().service(Files::new("/", "./tests").prefer_utf8(false)))
.await;
let req = TestRequest::with_uri("/utf8.txt").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(
res.headers().get(header::CONTENT_TYPE),
Some(&HeaderValue::from_static("text/plain")),
);
}

View File

@ -0,0 +1 @@
first

View File

@ -0,0 +1 @@
second

View File

@ -0,0 +1,36 @@
use actix_files::Files;
use actix_web::{
guard::Host,
http::StatusCode,
test::{self, TestRequest},
App,
};
use bytes::Bytes;
#[actix_web::test]
async fn test_guard_filter() {
let srv = test::init_service(
App::new()
.service(Files::new("/", "./tests/fixtures/guards/first").guard(Host("first.com")))
.service(
Files::new("/", "./tests/fixtures/guards/second").guard(Host("second.com")),
),
)
.await;
let req = TestRequest::with_uri("/index.txt")
.append_header(("Host", "first.com"))
.to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(test::read_body(res).await, Bytes::from("first"));
let req = TestRequest::with_uri("/index.txt")
.append_header(("Host", "second.com"))
.to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(test::read_body(res).await, Bytes::from("second"));
}

View File

@ -0,0 +1 @@
test.png

View File

@ -0,0 +1 @@
// this file is empty.

View File

@ -0,0 +1,27 @@
use actix_files::Files;
use actix_web::{
http::StatusCode,
test::{self, TestRequest},
App,
};
#[actix_rt::test]
async fn test_directory_traversal_prevention() {
let srv = test::init_service(App::new().service(Files::new("/", "./tests"))).await;
let req =
TestRequest::with_uri("/../../../../../../../../../../../etc/passwd").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::NOT_FOUND);
let req = TestRequest::with_uri(
"/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/etc/passwd",
)
.to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::NOT_FOUND);
let req = TestRequest::with_uri("/%00/etc/passwd%00").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::NOT_FOUND);
}

View File

@ -0,0 +1,3 @@
中文内容显示正确。
English is OK.

138
actix-http-test/CHANGES.md Normal file
View File

@ -0,0 +1,138 @@
# Changes
## Unreleased - 2021-xx-xx
## 3.0.0-beta.11 - 2022-01-04
- Minimum supported Rust version (MSRV) is now 1.54.
## 3.0.0-beta.10 - 2021-12-27
- Update `actix-server` to `2.0.0-rc.2`. [#2550]
[#2550]: https://github.com/actix/actix-web/pull/2550
## 3.0.0-beta.9 - 2021-12-11
- No significant changes since `3.0.0-beta.8`.
## 3.0.0-beta.8 - 2021-11-30
- Update `actix-tls` to `3.0.0-rc.1`. [#2474]
[#2474]: https://github.com/actix/actix-web/pull/2474
## 3.0.0-beta.7 - 2021-11-22
- Fix compatibility with experimental `io-uring` feature of `actix-rt`. [#2408]
[#2408]: https://github.com/actix/actix-web/pull/2408
## 3.0.0-beta.6 - 2021-11-15
- `TestServer::stop` is now async and will wait for the server and system to shutdown. [#2442]
- Update `actix-server` to `2.0.0-beta.9`. [#2442]
- Minimum supported Rust version (MSRV) is now 1.52.
[#2442]: https://github.com/actix/actix-web/pull/2442
## 3.0.0-beta.5 - 2021-09-09
- Minimum supported Rust version (MSRV) is now 1.51.
## 3.0.0-beta.4 - 2021-04-02
- Added `TestServer::client_headers` method. [#2097]
[#2097]: https://github.com/actix/actix-web/pull/2097
## 3.0.0-beta.3 - 2021-03-09
- No notable changes.
## 3.0.0-beta.2 - 2021-02-10
- No notable changes.
## 3.0.0-beta.1 - 2021-01-07
- Update `bytes` to `1.0`. [#1813]
[#1813]: https://github.com/actix/actix-web/pull/1813
## 2.1.0 - 2020-11-25
- Add ability to set address for `TestServer`. [#1645]
- Upgrade `base64` to `0.13`.
- Upgrade `serde_urlencoded` to `0.7`. [#1773]
[#1773]: https://github.com/actix/actix-web/pull/1773
[#1645]: https://github.com/actix/actix-web/pull/1645
## 2.0.0 - 2020-09-11
- Update actix-codec and actix-utils dependencies.
## 2.0.0-alpha.1 - 2020-05-23
- Update the `time` dependency to 0.2.7
- Update `actix-connect` dependency to 2.0.0-alpha.2
- Make `test_server` `async` fn.
- Bump minimum supported Rust version to 1.40
- Replace deprecated `net2` crate with `socket2`
- Update `base64` dependency to 0.12
- Update `env_logger` dependency to 0.7
## 1.0.0 - 2019-12-13
- Replaced `TestServer::start()` with `test_server()`
## 1.0.0-alpha.3 - 2019-12-07
- Migrate to `std::future`
## 0.2.5 - 2019-09-17
- Update serde_urlencoded to "0.6.1"
- Increase TestServerRuntime timeouts from 500ms to 3000ms
- Do not override current `System`
## 0.2.4 - 2019-07-18
- Update actix-server to 0.6
## 0.2.3 - 2019-07-16
- Add `delete`, `options`, `patch` methods to `TestServerRunner`
## 0.2.2 - 2019-06-16
- Add .put() and .sput() methods
## 0.2.1 - 2019-06-05
- Add license files
## 0.2.0 - 2019-05-12
- Update awc and actix-http deps
## 0.1.1 - 2019-04-24
- Always make new connection for http client
## 0.1.0 - 2019-04-16
- No changes
## 0.1.0-alpha.3 - 2019-04-02
- Request functions accept path #743
## 0.1.0-alpha.2 - 2019-03-29
- Added TestServerRuntime::load_body() method
- Update actix-http and awc libraries
## 0.1.0-alpha.1 - 2019-03-28
- Initial impl

View File

@ -0,0 +1,55 @@
[package]
name = "actix-http-test"
version = "3.0.0-beta.11"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Various helpers for Actix applications to use during testing"
keywords = ["http", "web", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
categories = [
"network-programming",
"asynchronous",
"web-programming::http-server",
"web-programming::websocket",
]
license = "MIT OR Apache-2.0"
edition = "2018"
[package.metadata.docs.rs]
features = []
[lib]
name = "actix_http_test"
path = "src/lib.rs"
[features]
default = []
# openssl
openssl = ["tls-openssl", "awc/openssl"]
[dependencies]
actix-service = "2.0.0"
actix-codec = "0.4.1"
actix-tls = "3.0.0"
actix-utils = "3.0.0"
actix-rt = "2.2"
actix-server = "2"
awc = { version = "3.0.0-beta.18", default-features = false }
base64 = "0.13"
bytes = "1"
futures-core = { version = "0.3.7", default-features = false }
http = "0.2.5"
log = "0.4"
socket2 = "0.4"
serde = "1.0"
serde_json = "1.0"
slab = "0.4"
serde_urlencoded = "0.7"
tls-openssl = { version = "0.10.9", package = "openssl", optional = true }
tokio = { version = "1.8.4", features = ["sync"] }
[dev-dependencies]
actix-web = { version = "4.0.0-beta.20", default-features = false, features = ["cookies"] }
actix-http = "3.0.0-beta.18"

17
actix-http-test/README.md Normal file
View File

@ -0,0 +1,17 @@
# actix-http-test
> Various helpers for Actix applications to use during testing.
[![crates.io](https://img.shields.io/crates/v/actix-http-test?label=latest)](https://crates.io/crates/actix-http-test)
[![Documentation](https://docs.rs/actix-http-test/badge.svg?version=3.0.0-beta.11)](https://docs.rs/actix-http-test/3.0.0-beta.11)
[![Version](https://img.shields.io/badge/rustc-1.54+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.54.0.html)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http-test)
<br>
[![Dependency Status](https://deps.rs/crate/actix-http-test/3.0.0-beta.11/status.svg)](https://deps.rs/crate/actix-http-test/3.0.0-beta.11)
[![Download](https://img.shields.io/crates/d/actix-http-test.svg)](https://crates.io/crates/actix-http-test)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
## Documentation & Resources
- [API Documentation](https://docs.rs/actix-http-test)
- Minimum Supported Rust Version (MSRV): 1.54

View File

@ -1,40 +1,48 @@
//! Various helpers for Actix applications to use during testing.
use std::sync::mpsc;
use std::{net, thread, time};
#![deny(rust_2018_idioms, nonstandard_style)]
#![warn(future_incompatible)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
#[cfg(feature = "openssl")]
extern crate tls_openssl as openssl;
use std::{net, thread, time::Duration};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_rt::{net::TcpStream, System};
use actix_server::{Server, ServiceFactory};
use awc::{error::PayloadError, ws, Client, ClientRequest, ClientResponse, Connector};
use actix_server::{Server, ServerServiceFactory};
use awc::{
error::PayloadError, http::header::HeaderMap, ws, Client, ClientRequest, ClientResponse,
Connector,
};
use bytes::Bytes;
use futures_core::stream::Stream;
use http::Method;
use socket2::{Domain, Protocol, Socket, Type};
use tokio::sync::mpsc;
pub use actix_testing::*;
/// Start test server
/// Start test server.
///
/// `TestServer` is very simple test server that simplify process of writing
/// integration tests cases for actix web applications.
/// `TestServer` is very simple test server that simplify process of writing integration tests cases
/// for HTTP applications.
///
/// # Examples
///
/// ```rust
/// ```no_run
/// use actix_http::HttpService;
/// use actix_http_test::TestServer;
/// use actix_http_test::test_server;
/// use actix_web::{web, App, HttpResponse, Error};
///
/// async fn my_handler() -> Result<HttpResponse, Error> {
/// Ok(HttpResponse::Ok().into())
/// }
///
/// #[actix_rt::test]
/// #[actix_web::test]
/// async fn test_example() {
/// let mut srv = TestServer::start(
/// || HttpService::new(
/// App::new().service(
/// web::resource("/").to(my_handler))
/// let mut srv = TestServer::start(||
/// HttpService::new(
/// App::new().service(web::resource("/").to(my_handler))
/// )
/// );
///
@ -43,80 +51,91 @@ pub use actix_testing::*;
/// assert!(response.status().is_success());
/// }
/// ```
pub async fn test_server<F: ServiceFactory<TcpStream>>(factory: F) -> TestServer {
let (tx, rx) = mpsc::channel();
pub async fn test_server<F: ServerServiceFactory<TcpStream>>(factory: F) -> TestServer {
let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap();
test_server_with_addr(tcp, factory).await
}
/// Start [`test server`](test_server()) on an existing address binding.
pub async fn test_server_with_addr<F: ServerServiceFactory<TcpStream>>(
tcp: net::TcpListener,
factory: F,
) -> TestServer {
let (started_tx, started_rx) = std::sync::mpsc::channel();
let (thread_stop_tx, thread_stop_rx) = mpsc::channel(1);
// run server in separate thread
thread::spawn(move || {
let sys = System::new("actix-test-server");
let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap();
let local_addr = tcp.local_addr().unwrap();
System::new().block_on(async move {
let local_addr = tcp.local_addr().unwrap();
Server::build()
.listen("test", tcp, factory)?
.workers(1)
.disable_signals()
.start();
let srv = Server::build()
.workers(1)
.disable_signals()
.system_exit()
.listen("test", tcp, factory)
.expect("test server could not be created");
tx.send((System::current(), local_addr)).unwrap();
sys.run()
let srv = srv.run();
started_tx
.send((System::current(), srv.handle(), local_addr))
.unwrap();
// drive server loop
srv.await.unwrap();
});
// notify TestServer that server and system have shut down
// all thread managed resources should be dropped at this point
let _ = thread_stop_tx.send(());
});
let (system, addr) = rx.recv().unwrap();
let (system, server, addr) = started_rx.recv().unwrap();
let client = {
#[cfg(feature = "openssl")]
let connector = {
#[cfg(feature = "openssl")]
{
use open_ssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
let mut builder = SslConnector::builder(SslMethod::tls()).unwrap();
builder.set_verify(SslVerifyMode::NONE);
let _ = builder
.set_alpn_protos(b"\x02h2\x08http/1.1")
.map_err(|e| log::error!("Can not set alpn protocol: {:?}", e));
Connector::new()
.conn_lifetime(time::Duration::from_secs(0))
.timeout(time::Duration::from_millis(30000))
.ssl(builder.build())
.finish()
}
#[cfg(not(feature = "openssl"))]
{
Connector::new()
.conn_lifetime(time::Duration::from_secs(0))
.timeout(time::Duration::from_millis(30000))
.finish()
}
let mut builder = SslConnector::builder(SslMethod::tls()).unwrap();
builder.set_verify(SslVerifyMode::NONE);
let _ = builder
.set_alpn_protos(b"\x02h2\x08http/1.1")
.map_err(|e| log::error!("Can not set alpn protocol: {:?}", e));
Connector::new()
.conn_lifetime(Duration::from_secs(0))
.timeout(Duration::from_millis(30000))
.openssl(builder.build())
};
#[cfg(not(feature = "openssl"))]
let connector = {
Connector::new()
.conn_lifetime(Duration::from_secs(0))
.timeout(Duration::from_millis(30000))
};
Client::builder().connector(connector).finish()
};
actix_connect::start_default_resolver().await.unwrap();
TestServer {
addr,
server,
client,
system,
addr,
thread_stop_rx,
}
}
/// Get first available unused address
pub fn unused_addr() -> net::SocketAddr {
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
let socket =
Socket::new(Domain::ipv4(), Type::stream(), Some(Protocol::tcp())).unwrap();
socket.bind(&addr.into()).unwrap();
socket.set_reuse_address(true).unwrap();
let tcp = socket.into_tcp_listener();
tcp.local_addr().unwrap()
}
/// Test server controller
pub struct TestServer {
server: actix_server::ServerHandle,
client: awc::Client,
system: actix_rt::System,
addr: net::SocketAddr,
client: Client,
system: System,
thread_stop_rx: mpsc::Receiver<()>,
}
impl TestServer {
@ -134,7 +153,7 @@ impl TestServer {
}
}
/// Construct test https server url
/// Construct test HTTPS server URL.
pub fn surl(&self, uri: &str) -> String {
if uri.starts_with('/') {
format!("https://localhost:{}{}", self.addr.port(), uri)
@ -148,7 +167,7 @@ impl TestServer {
self.client.get(self.url(path.as_ref()).as_str())
}
/// Create https `GET` request
/// Create HTTPS `GET` request
pub fn sget<S: AsRef<str>>(&self, path: S) -> ClientRequest {
self.client.get(self.surl(path.as_ref()).as_str())
}
@ -158,7 +177,7 @@ impl TestServer {
self.client.post(self.url(path.as_ref()).as_str())
}
/// Create https `POST` request
/// Create HTTPS `POST` request
pub fn spost<S: AsRef<str>>(&self, path: S) -> ClientRequest {
self.client.post(self.surl(path.as_ref()).as_str())
}
@ -168,7 +187,7 @@ impl TestServer {
self.client.head(self.url(path.as_ref()).as_str())
}
/// Create https `HEAD` request
/// Create HTTPS `HEAD` request
pub fn shead<S: AsRef<str>>(&self, path: S) -> ClientRequest {
self.client.head(self.surl(path.as_ref()).as_str())
}
@ -178,7 +197,7 @@ impl TestServer {
self.client.put(self.url(path.as_ref()).as_str())
}
/// Create https `PUT` request
/// Create HTTPS `PUT` request
pub fn sput<S: AsRef<str>>(&self, path: S) -> ClientRequest {
self.client.put(self.surl(path.as_ref()).as_str())
}
@ -188,7 +207,7 @@ impl TestServer {
self.client.patch(self.url(path.as_ref()).as_str())
}
/// Create https `PATCH` request
/// Create HTTPS `PATCH` request
pub fn spatch<S: AsRef<str>>(&self, path: S) -> ClientRequest {
self.client.patch(self.surl(path.as_ref()).as_str())
}
@ -198,7 +217,7 @@ impl TestServer {
self.client.delete(self.url(path.as_ref()).as_str())
}
/// Create https `DELETE` request
/// Create HTTPS `DELETE` request
pub fn sdelete<S: AsRef<str>>(&self, path: S) -> ClientRequest {
self.client.delete(self.surl(path.as_ref()).as_str())
}
@ -208,12 +227,12 @@ impl TestServer {
self.client.options(self.url(path.as_ref()).as_str())
}
/// Create https `OPTIONS` request
/// Create HTTPS `OPTIONS` request
pub fn soptions<S: AsRef<str>>(&self, path: S) -> ClientRequest {
self.client.options(self.surl(path.as_ref()).as_str())
}
/// Connect to test http server
/// Connect to test HTTP server
pub fn request<S: AsRef<str>>(&self, method: Method, path: S) -> ClientRequest {
self.client.request(method, path.as_ref())
}
@ -228,33 +247,66 @@ impl TestServer {
response.body().limit(10_485_760).await
}
/// Connect to websocket server at a given path
/// Connect to WebSocket server at a given path.
pub async fn ws_at(
&mut self,
path: &str,
) -> Result<Framed<impl AsyncRead + AsyncWrite, ws::Codec>, awc::error::WsClientError>
{
) -> Result<Framed<impl AsyncRead + AsyncWrite, ws::Codec>, awc::error::WsClientError> {
let url = self.url(path);
let connect = self.client.ws(url).connect();
connect.await.map(|(_, framed)| framed)
}
/// Connect to a websocket server
/// Connect to a WebSocket server.
pub async fn ws(
&mut self,
) -> Result<Framed<impl AsyncRead + AsyncWrite, ws::Codec>, awc::error::WsClientError>
{
) -> Result<Framed<impl AsyncRead + AsyncWrite, ws::Codec>, awc::error::WsClientError> {
self.ws_at("/").await
}
/// Stop http server
fn stop(&mut self) {
/// Get default HeaderMap of Client.
///
/// Returns Some(&mut HeaderMap) when Client object is unique
/// (No other clone of client exists at the same time).
pub fn client_headers(&mut self) -> Option<&mut HeaderMap> {
self.client.headers()
}
/// Stop HTTP server.
///
/// Waits for spawned `Server` and `System` to (force) shutdown.
pub async fn stop(&mut self) {
// signal server to stop
self.server.stop(false).await;
// also signal system to stop
// though this is handled by `ServerBuilder::exit_system` too
self.system.stop();
// wait for thread to be stopped but don't care about result
let _ = self.thread_stop_rx.recv().await;
}
}
impl Drop for TestServer {
fn drop(&mut self) {
self.stop()
// calls in this Drop impl should be enough to shut down the server, system, and thread
// without needing to await anything
// signal server to stop
let _ = self.server.stop(true);
// signal system to stop
self.system.stop();
}
}
/// Get a localhost socket address with random, unused port.
pub fn unused_addr() -> net::SocketAddr {
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
let socket = Socket::new(Domain::IPV4, Type::STREAM, Some(Protocol::TCP)).unwrap();
socket.bind(&addr.into()).unwrap();
socket.set_reuse_address(true).unwrap();
let tcp = net::TcpListener::from(socket);
tcp.local_addr().unwrap()
}

File diff suppressed because it is too large Load Diff

View File

@ -1,46 +0,0 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at fafhrd91@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/

View File

@ -1,21 +1,23 @@
[package]
name = "actix-http"
version = "2.0.0"
version = "3.0.0-beta.18"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix HTTP primitives"
readme = "README.md"
description = "HTTP primitives for the Actix ecosystem"
keywords = ["actix", "http", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-http/"
categories = ["network-programming", "asynchronous",
"web-programming::http-server",
"web-programming::websocket"]
categories = [
"network-programming",
"asynchronous",
"web-programming::http-server",
"web-programming::websocket",
]
license = "MIT OR Apache-2.0"
edition = "2018"
[package.metadata.docs.rs]
features = ["openssl", "rustls", "compress", "secure-cookies", "actors"]
# features that docs.rs will build with
features = ["openssl", "rustls", "compress-brotli", "compress-gzip", "compress-zstd"]
[lib]
name = "actix_http"
@ -25,79 +27,84 @@ path = "src/lib.rs"
default = []
# openssl
openssl = ["actix-tls/openssl", "actix-connect/openssl"]
openssl = ["actix-tls/accept", "actix-tls/openssl"]
# rustls support
rustls = ["actix-tls/rustls", "actix-connect/rustls"]
rustls = ["actix-tls/accept", "actix-tls/rustls"]
# enable compressison support
compress = ["flate2", "brotli2"]
# enable compression support
compress-brotli = ["brotli", "__compress"]
compress-gzip = ["flate2", "__compress"]
compress-zstd = ["zstd", "__compress"]
# support for secure cookies
secure-cookies = ["cookie/secure"]
# support for actix Actor messages
actors = ["actix"]
# Internal (PRIVATE!) features used to aid testing and cheking feature status.
# Don't rely on these whatsoever. They may disappear at anytime.
__compress = []
[dependencies]
actix-service = "1.0.6"
actix-codec = "0.3.0"
actix-connect = "2.0.0"
actix-utils = "2.0.0"
actix-rt = "1.0.0"
actix-threadpool = "0.3.1"
actix-tls = { version = "2.0.0", optional = true }
actix = { version = "0.10.0", optional = true }
actix-service = "2.0.0"
actix-codec = "0.4.1"
actix-utils = "3.0.0"
actix-rt = { version = "2.2", default-features = false }
base64 = "0.12"
ahash = "0.7"
base64 = "0.13"
bitflags = "1.2"
bytes = "0.5.3"
cookie = { version = "0.14.1", features = ["percent-encode"] }
copyless = "0.1.4"
derive_more = "0.99.2"
either = "1.5.3"
bytes = "1"
bytestring = "1"
derive_more = "0.99.5"
encoding_rs = "0.8"
futures-channel = { version = "0.3.5", default-features = false }
futures-core = { version = "0.3.5", default-features = false }
futures-util = { version = "0.3.5", default-features = false }
fxhash = "0.2.1"
h2 = "0.2.1"
http = "0.2.0"
httparse = "1.3"
indexmap = "1.3"
itoa = "0.4"
lazy_static = "1.4"
language-tags = "0.2"
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
h2 = "0.3.9"
http = "0.2.5"
httparse = "1.5.1"
httpdate = "1.0.1"
itoa = "1"
language-tags = "0.3"
local-channel = "0.1"
log = "0.4"
mime = "0.3"
percent-encoding = "2.1"
pin-project = "0.4.17"
rand = "0.7"
regex = "1.3"
serde = "1.0"
serde_json = "1.0"
sha-1 = "0.9"
slab = "0.4"
serde_urlencoded = "0.6.1"
time = { version = "0.2.7", default-features = false, features = ["std"] }
pin-project-lite = "0.2"
rand = "0.8"
sha-1 = "0.10"
smallvec = "1.6.1"
# tls
actix-tls = { version = "3.0.0", default-features = false, optional = true }
# compression
brotli2 = { version="0.3.2", optional = true }
brotli = { version = "3.3.3", optional = true }
flate2 = { version = "1.0.13", optional = true }
zstd = { version = "0.9", optional = true }
[dev-dependencies]
actix-server = "1.0.1"
actix-connect = { version = "2.0.0", features = ["openssl"] }
actix-http-test = { version = "2.0.0", features = ["openssl"] }
actix-tls = { version = "2.0.0", features = ["openssl"] }
criterion = "0.3"
env_logger = "0.7"
serde_derive = "1.0"
open-ssl = { version="0.10", package = "openssl" }
rust-tls = { version="0.18", package = "rustls" }
actix-http-test = { version = "3.0.0-beta.11", features = ["openssl"] }
actix-server = "2"
actix-tls = { version = "3.0.0", features = ["openssl"] }
actix-web = "4.0.0-beta.20"
async-stream = "0.3"
criterion = { version = "0.3", features = ["html_reports"] }
env_logger = "0.9"
futures-util = { version = "0.3.7", default-features = false, features = ["alloc"] }
memchr = "2.4"
rcgen = "0.8"
regex = "1.3"
rustls-pemfile = "0.2"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
static_assertions = "1"
tls-openssl = { package = "openssl", version = "0.10.9" }
tls-rustls = { package = "rustls", version = "0.20.0" }
tokio = { version = "1.8.4", features = ["net", "rt", "macros"] }
[[example]]
name = "ws"
required-features = ["rustls"]
[[bench]]
name = "content-length"
name = "write-camel-case"
harness = false
[[bench]]
@ -107,3 +114,7 @@ harness = false
[[bench]]
name = "uninit-headers"
harness = false
[[bench]]
name = "quality-value"
harness = false

View File

@ -1,24 +1,29 @@
# Actix http [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-http)](https://crates.io/crates/actix-http) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
# actix-http
Actix http
> HTTP primitives for the Actix ecosystem.
## Documentation & community resources
[![crates.io](https://img.shields.io/crates/v/actix-http?label=latest)](https://crates.io/crates/actix-http)
[![Documentation](https://docs.rs/actix-http/badge.svg?version=3.0.0-beta.18)](https://docs.rs/actix-http/3.0.0-beta.18)
[![Version](https://img.shields.io/badge/rustc-1.54+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.54.0.html)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http.svg)
<br />
[![dependency status](https://deps.rs/crate/actix-http/3.0.0-beta.18/status.svg)](https://deps.rs/crate/actix-http/3.0.0-beta.18)
[![Download](https://img.shields.io/crates/d/actix-http.svg)](https://crates.io/crates/actix-http)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
* [User Guide](https://actix.rs/docs/)
* [API Documentation](https://docs.rs/actix-http/)
* [Chat on gitter](https://gitter.im/actix/actix)
* Cargo package: [actix-http](https://crates.io/crates/actix-http)
* Minimum supported Rust version: 1.40 or later
## Documentation & Resources
- [API Documentation](https://docs.rs/actix-http)
- Minimum Supported Rust Version (MSRV): 1.54
## Example
```rust
// see examples/framed_hello.rs for complete list of used crates.
use std::{env, io};
use actix_http::{HttpService, Response};
use actix_server::Server;
use futures::future;
use futures_util::future;
use http::header::HeaderValue;
use log::info;
@ -49,8 +54,8 @@ async fn main() -> io::Result<()> {
This project is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
* MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
- MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
at your option.

View File

@ -1,291 +0,0 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use bytes::BytesMut;
// benchmark sending all requests at the same time
fn bench_write_content_length(c: &mut Criterion) {
let mut group = c.benchmark_group("write_content_length");
let sizes = [
0, 1, 11, 83, 101, 653, 1001, 6323, 10001, 56329, 100001, 123456, 98724245,
4294967202,
];
for i in sizes.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_content_length(i, &mut b)
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_content_length(i, &mut b)
})
});
group.bench_with_input(BenchmarkId::new("itoa", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_itoa::write_content_length(i, &mut b)
})
});
}
group.finish();
}
criterion_group!(benches, bench_write_content_length);
criterion_main!(benches);
mod _itoa {
use bytes::{BufMut, BytesMut};
pub fn write_content_length(n: usize, bytes: &mut BytesMut) {
if n == 0 {
bytes.put_slice(b"\r\ncontent-length: 0\r\n");
return;
}
let mut buf = itoa::Buffer::new();
bytes.put_slice(b"\r\ncontent-length: ");
bytes.put_slice(buf.format(n).as_bytes());
bytes.put_slice(b"\r\n");
}
}
mod _new {
use bytes::{BufMut, BytesMut};
const DIGITS_START: u8 = b'0';
/// NOTE: bytes object has to contain enough space
pub fn write_content_length(n: usize, bytes: &mut BytesMut) {
if n == 0 {
bytes.put_slice(b"\r\ncontent-length: 0\r\n");
return;
}
bytes.put_slice(b"\r\ncontent-length: ");
if n < 10 {
bytes.put_u8(DIGITS_START + (n as u8));
} else if n < 100 {
let n = n as u8;
let d10 = n / 10;
let d1 = n % 10;
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 1000 {
let n = n as u16;
let d100 = (n / 100) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 10_000 {
let n = n as u16;
let d1000 = (n / 1000) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 100_000 {
let n = n as u32;
let d10000 = (n / 10000) as u8;
let d1000 = ((n / 1000) % 10) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d10000);
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 1_000_000 {
let n = n as u32;
let d100000 = (n / 100000) as u8;
let d10000 = ((n / 10000) % 10) as u8;
let d1000 = ((n / 1000) % 10) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100000);
bytes.put_u8(DIGITS_START + d10000);
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else {
write_usize(n, bytes);
}
bytes.put_slice(b"\r\n");
}
fn write_usize(n: usize, bytes: &mut BytesMut) {
let mut n = n;
// 20 chars is max length of a usize (2^64)
// digits will be added to the buffer from lsd to msd
let mut buf = BytesMut::with_capacity(20);
while n > 9 {
// "pop" the least-significant digit
let lsd = (n % 10) as u8;
// remove the lsd from n
n = n / 10;
buf.put_u8(DIGITS_START + lsd);
}
// put msd to result buffer
bytes.put_u8(DIGITS_START + (n as u8));
// put, in reverse (msd to lsd), remaining digits to buffer
for i in (0..buf.len()).rev() {
bytes.put_u8(buf[i]);
}
}
}
mod _original {
use std::{mem, ptr, slice};
use bytes::{BufMut, BytesMut};
const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\
2021222324252627282930313233343536373839\
4041424344454647484950515253545556575859\
6061626364656667686970717273747576777879\
8081828384858687888990919293949596979899";
/// NOTE: bytes object has to contain enough space
pub fn write_content_length(mut n: usize, bytes: &mut BytesMut) {
if n < 10 {
let mut buf: [u8; 21] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'\r', b'\n',
];
buf[18] = (n as u8) + b'0';
bytes.put_slice(&buf);
} else if n < 100 {
let mut buf: [u8; 22] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'\r', b'\n',
];
let d1 = n << 1;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(18),
2,
);
}
bytes.put_slice(&buf);
} else if n < 1000 {
let mut buf: [u8; 23] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'0', b'\r',
b'\n',
];
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(19),
2,
)
};
// decode last 1
buf[18] = (n as u8) + b'0';
bytes.put_slice(&buf);
} else {
bytes.put_slice(b"\r\ncontent-length: ");
convert_usize(n, bytes);
}
}
pub(crate) fn convert_usize(mut n: usize, bytes: &mut BytesMut) {
let mut curr: isize = 39;
let mut buf: [u8; 41] = unsafe { mem::MaybeUninit::uninit().assume_init() };
buf[39] = b'\r';
buf[40] = b'\n';
let buf_ptr = buf.as_mut_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
// eagerly decode 4 characters at a time
while n >= 10_000 {
let rem = (n % 10_000) as isize;
n /= 10_000;
let d1 = (rem / 100) << 1;
let d2 = (rem % 100) << 1;
curr -= 4;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
ptr::copy_nonoverlapping(
lut_ptr.offset(d2),
buf_ptr.offset(curr + 2),
2,
);
}
}
// if we reach here numbers are <= 9999, so at most 4 chars long
let mut n = n as isize; // possibly reduce 64bit math
// decode 2 more chars, if > 2 chars
if n >= 100 {
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0';
}
} else {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
unsafe {
bytes.extend_from_slice(slice::from_raw_parts(
buf_ptr.offset(curr),
41 - curr as usize,
));
}
}
}

View File

@ -0,0 +1,95 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
const CODES: &[u16] = &[0, 1000, 201, 800, 550];
fn bench_quality_display_impls(c: &mut Criterion) {
let mut group = c.benchmark_group("quality value display impls");
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("New (fast?)", i), i, |b, &i| {
b.iter(|| _new::Quality(i).to_string())
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| _naive::Quality(i).to_string())
});
}
group.finish();
}
criterion_group!(benches, bench_quality_display_impls);
criterion_main!(benches);
mod _new {
use std::fmt;
pub struct Quality(pub(crate) u16);
impl fmt::Display for Quality {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0 {
0 => f.write_str("0"),
1000 => f.write_str("1"),
// some number in the range 1999
x => {
f.write_str("0.")?;
// this implementation avoids string allocation otherwise required
// for `.trim_end_matches('0')`
if x < 10 {
f.write_str("00")?;
// 0 is handled so it's not possible to have a trailing 0, we can just return
itoa_fmt(f, x)
} else if x < 100 {
f.write_str("0")?;
if x % 10 == 0 {
// trailing 0, divide by 10 and write
itoa_fmt(f, x / 10)
} else {
itoa_fmt(f, x)
}
} else {
// x is in range 101999
if x % 100 == 0 {
// two trailing 0s, divide by 100 and write
itoa_fmt(f, x / 100)
} else if x % 10 == 0 {
// one trailing 0, divide by 10 and write
itoa_fmt(f, x / 10)
} else {
itoa_fmt(f, x)
}
}
}
}
}
}
pub fn itoa_fmt<W: fmt::Write, V: itoa::Integer>(mut wr: W, value: V) -> fmt::Result {
let mut buf = itoa::Buffer::new();
wr.write_str(buf.format(value))
}
}
mod _naive {
use std::fmt;
pub struct Quality(pub(crate) u16);
impl fmt::Display for Quality {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0 {
0 => f.write_str("0"),
1000 => f.write_str("1"),
x => {
write!(f, "{}", format!("{:03}", x).trim_end_matches('0'))
}
}
}
}
}

View File

@ -176,7 +176,7 @@ mod _original {
buf[5] = b'0';
buf[7] = b'9';
}
_ => (),
_ => {}
}
let mut curr: isize = 12;
@ -189,11 +189,7 @@ mod _original {
n /= 100;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(
lut_ptr.offset(d1 as isize),
buf_ptr.offset(curr),
2,
);
ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(curr), 2);
}
// decode last 1 or 2 chars
@ -206,11 +202,7 @@ mod _original {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(
lut_ptr.offset(d1 as isize),
buf_ptr.offset(curr),
2,
);
ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(curr), 2);
}
}

View File

@ -54,15 +54,10 @@ const EMPTY_HEADER_INDEX: HeaderIndex = HeaderIndex {
value: (0, 0),
};
const EMPTY_HEADER_INDEX_ARRAY: [HeaderIndex; MAX_HEADERS] =
[EMPTY_HEADER_INDEX; MAX_HEADERS];
const EMPTY_HEADER_INDEX_ARRAY: [HeaderIndex; MAX_HEADERS] = [EMPTY_HEADER_INDEX; MAX_HEADERS];
impl HeaderIndex {
fn record(
bytes: &[u8],
headers: &[httparse::Header<'_>],
indices: &mut [HeaderIndex],
) {
fn record(bytes: &[u8], headers: &[httparse::Header<'_>], indices: &mut [HeaderIndex]) {
let bytes_ptr = bytes.as_ptr() as usize;
for (header, indices) in headers.iter().zip(indices.iter_mut()) {
let name_start = header.name.as_ptr() as usize - bytes_ptr;
@ -78,12 +73,12 @@ impl HeaderIndex {
// test cases taken from:
// https://github.com/seanmonstar/httparse/blob/master/benches/parse.rs
const REQ_SHORT: &'static [u8] = b"\
const REQ_SHORT: &[u8] = b"\
GET / HTTP/1.0\r\n\
Host: example.com\r\n\
Cookie: session=60; user_id=1\r\n\r\n";
const REQ: &'static [u8] = b"\
const REQ: &[u8] = b"\
GET /wp-content/uploads/2010/03/hello-kitty-darth-vader-pink.jpg HTTP/1.1\r\n\
Host: www.kittyhell.com\r\n\
User-Agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; ja-JP-mac; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 Pathtraq/0.9\r\n\
@ -119,6 +114,8 @@ mod _original {
use std::mem::MaybeUninit;
pub fn parse_headers(src: &mut BytesMut) -> usize {
#![allow(clippy::uninit_assumed_init)]
let mut headers: [HeaderIndex; MAX_HEADERS] =
unsafe { MaybeUninit::uninit().assume_init() };

View File

@ -0,0 +1,93 @@
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
fn bench_write_camel_case(c: &mut Criterion) {
let mut group = c.benchmark_group("write_camel_case");
let names = ["connection", "Transfer-Encoding", "transfer-encoding"];
for &i in &names {
let bts = i.as_bytes();
group.bench_with_input(BenchmarkId::new("Original", i), bts, |b, bts| {
b.iter(|| {
let mut buf = black_box([0; 24]);
_original::write_camel_case(black_box(bts), &mut buf)
});
});
group.bench_with_input(BenchmarkId::new("New", i), bts, |b, bts| {
b.iter(|| {
let mut buf = black_box([0; 24]);
let len = black_box(bts.len());
_new::write_camel_case(black_box(bts), buf.as_mut_ptr(), len)
});
});
}
group.finish();
}
criterion_group!(benches, bench_write_camel_case);
criterion_main!(benches);
mod _new {
pub fn write_camel_case(value: &[u8], buf: *mut u8, len: usize) {
// first copy entire (potentially wrong) slice to output
let buffer = unsafe {
std::ptr::copy_nonoverlapping(value.as_ptr(), buf, len);
std::slice::from_raw_parts_mut(buf, len)
};
let mut iter = value.iter();
// first character should be uppercase
if let Some(c @ b'a'..=b'z') = iter.next() {
buffer[0] = c & 0b1101_1111;
}
// track 1 ahead of the current position since that's the location being assigned to
let mut index = 2;
// remaining characters after hyphens should also be uppercase
while let Some(&c) = iter.next() {
if c == b'-' {
// advance iter by one and uppercase if needed
if let Some(c @ b'a'..=b'z') = iter.next() {
buffer[index] = c & 0b1101_1111;
}
}
index += 1;
}
}
}
mod _original {
pub fn write_camel_case(value: &[u8], buffer: &mut [u8]) {
let mut index = 0;
let key = value;
let mut key_iter = key.iter();
if let Some(c) = key_iter.next() {
if *c >= b'a' && *c <= b'z' {
buffer[index] = *c ^ b' ';
index += 1;
}
} else {
return;
}
while let Some(c) = key_iter.next() {
buffer[index] = *c;
index += 1;
if *c == b'-' {
if let Some(c) = key_iter.next() {
if *c >= b'a' && *c <= b'z' {
buffer[index] = *c ^ b' ';
index += 1;
}
}
}
}
}
}

View File

@ -0,0 +1,26 @@
use actix_http::HttpService;
use actix_server::Server;
use actix_service::map_config;
use actix_web::{dev::AppConfig, get, App};
#[get("/")]
async fn index() -> &'static str {
"Hello, world. From Actix Web!"
}
#[tokio::main(flavor = "current_thread")]
async fn main() -> std::io::Result<()> {
Server::build()
.bind("hello-world", "127.0.0.1:8080", || {
// construct actix-web app
let app = App::new().service(index);
HttpService::build()
// pass the app to service builder
// map_config is used to map App's configuration to ServiceBuilder
.finish(map_config(app, |_| AppConfig::default()))
.tcp()
})?
.run()
.await
}

View File

@ -1,35 +1,36 @@
use std::{env, io};
use std::io;
use actix_http::{Error, HttpService, Request, Response};
use actix_http::{Error, HttpService, Request, Response, StatusCode};
use actix_server::Server;
use bytes::BytesMut;
use futures_util::StreamExt;
use futures_util::StreamExt as _;
use http::header::HeaderValue;
use log::info;
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "echo=info");
env_logger::init();
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("echo", "127.0.0.1:8080", || {
.bind("echo", ("127.0.0.1", 8080), || {
HttpService::build()
.client_timeout(1000)
.client_disconnect(1000)
// handles HTTP/1.1 and HTTP/2
.finish(|mut req: Request| async move {
let mut body = BytesMut::new();
while let Some(item) = req.payload().next().await {
body.extend_from_slice(&item?);
}
info!("request body: {:?}", body);
Ok::<_, Error>(
Response::Ok()
.header("x-head", HeaderValue::from_static("dummy value!"))
.body(body),
)
log::info!("request body: {:?}", body);
let res = Response::build(StatusCode::OK)
.insert_header(("x-head", HeaderValue::from_static("dummy value!")))
.body(body);
Ok::<_, Error>(res)
})
// No TLS
.tcp()
})?
.run()

View File

@ -1,32 +1,34 @@
use std::{env, io};
use std::io;
use actix_http::http::HeaderValue;
use actix_http::{Error, HttpService, Request, Response};
use actix_server::Server;
use bytes::BytesMut;
use futures_util::StreamExt;
use log::info;
use actix_http::{
body::{BodyStream, MessageBody},
header, Error, HttpMessage, HttpService, Request, Response, StatusCode,
};
async fn handle_request(mut req: Request) -> Result<Response, Error> {
let mut body = BytesMut::new();
while let Some(item) = req.payload().next().await {
body.extend_from_slice(&item?)
async fn handle_request(mut req: Request) -> Result<Response<impl MessageBody>, Error> {
let mut res = Response::build(StatusCode::OK);
if let Some(ct) = req.headers().get(header::CONTENT_TYPE) {
res.insert_header((header::CONTENT_TYPE, ct));
}
info!("request body: {:?}", body);
Ok(Response::Ok()
.header("x-head", HeaderValue::from_static("dummy value!"))
.body(body))
// echo request payload stream as (chunked) response body
let res = res.message_body(BodyStream::new(req.payload().take()))?;
Ok(res)
}
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "echo=info");
env_logger::init();
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("echo", "127.0.0.1:8080", || {
HttpService::build().finish(handle_request).tcp()
actix_server::Server::build()
.bind("echo", ("127.0.0.1", 8080), || {
HttpService::build()
// handles HTTP/1.1 only
.h1(handle_request)
// No TLS
.tcp()
})?
.run()
.await

View File

@ -1,26 +1,35 @@
use std::{env, io};
use std::{convert::Infallible, io};
use actix_http::{HttpService, Response};
use actix_http::{
header::HeaderValue, HttpMessage, HttpService, Request, Response, StatusCode,
};
use actix_server::Server;
use futures_util::future;
use http::header::HeaderValue;
use log::info;
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "hello_world=info");
env_logger::init();
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("hello-world", "127.0.0.1:8080", || {
.bind("hello-world", ("127.0.0.1", 8080), || {
HttpService::build()
.client_timeout(1000)
.client_disconnect(1000)
.finish(|_req| {
info!("{:?}", _req);
let mut res = Response::Ok();
res.header("x-head", HeaderValue::from_static("dummy value!"));
future::ok::<_, ()>(res.body("Hello world!"))
.on_connect_ext(|_, ext| {
ext.insert(42u32);
})
.finish(|req: Request| async move {
log::info!("{:?}", req);
let mut res = Response::build(StatusCode::OK);
res.insert_header(("x-head", HeaderValue::from_static("dummy value!")));
let forty_two = req.extensions().get::<u32>().unwrap().to_string();
res.insert_header((
"x-forty-two",
HeaderValue::from_str(&forty_two).unwrap(),
));
Ok::<_, Infallible>(res.body("Hello world!"))
})
.tcp()
})?

View File

@ -0,0 +1,40 @@
//! Example showing response body (chunked) stream erroring.
//!
//! Test using `nc` or `curl`.
//! ```sh
//! $ curl -vN 127.0.0.1:8080
//! $ echo 'GET / HTTP/1.1\n\n' | nc 127.0.0.1 8080
//! ```
use std::{convert::Infallible, io, time::Duration};
use actix_http::{body::BodyStream, HttpService, Response};
use actix_server::Server;
use async_stream::stream;
use bytes::Bytes;
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("streaming-error", ("127.0.0.1", 8080), || {
HttpService::build()
.finish(|req| async move {
log::info!("{:?}", req);
let res = Response::ok();
Ok::<_, Infallible>(res.set_body(BodyStream::new(stream! {
yield Ok(Bytes::from("123"));
yield Ok(Bytes::from("456"));
actix_rt::time::sleep(Duration::from_millis(1000)).await;
yield Err(io::Error::new(io::ErrorKind::Other, ""));
})))
})
.tcp()
})?
.run()
.await
}

112
actix-http/examples/ws.rs Normal file
View File

@ -0,0 +1,112 @@
//! Sets up a WebSocket server over TCP and TLS.
//! Sends a heartbeat message every 4 seconds but does not respond to any incoming frames.
extern crate tls_rustls as rustls;
use std::{
io,
pin::Pin,
task::{Context, Poll},
time::Duration,
};
use actix_codec::Encoder;
use actix_http::{body::BodyStream, error::Error, ws, HttpService, Request, Response};
use actix_rt::time::{interval, Interval};
use actix_server::Server;
use bytes::{Bytes, BytesMut};
use bytestring::ByteString;
use futures_core::{ready, Stream};
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("tcp", ("127.0.0.1", 8080), || {
HttpService::build().h1(handler).tcp()
})?
.bind("tls", ("127.0.0.1", 8443), || {
HttpService::build().finish(handler).rustls(tls_config())
})?
.run()
.await
}
async fn handler(req: Request) -> Result<Response<BodyStream<Heartbeat>>, Error> {
log::info!("handshaking");
let mut res = ws::handshake(req.head())?;
// handshake will always fail under HTTP/2
log::info!("responding");
Ok(res.message_body(BodyStream::new(Heartbeat::new(ws::Codec::new())))?)
}
struct Heartbeat {
codec: ws::Codec,
interval: Interval,
}
impl Heartbeat {
fn new(codec: ws::Codec) -> Self {
Self {
codec,
interval: interval(Duration::from_secs(4)),
}
}
}
impl Stream for Heartbeat {
type Item = Result<Bytes, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
log::trace!("poll");
ready!(self.as_mut().interval.poll_tick(cx));
let mut buffer = BytesMut::new();
self.as_mut()
.codec
.encode(
ws::Message::Text(ByteString::from_static("hello world")),
&mut buffer,
)
.unwrap();
Poll::Ready(Some(Ok(buffer.freeze())))
}
}
fn tls_config() -> rustls::ServerConfig {
use std::io::BufReader;
use rustls::{Certificate, PrivateKey};
use rustls_pemfile::{certs, pkcs8_private_keys};
let cert = rcgen::generate_simple_self_signed(vec!["localhost".to_owned()]).unwrap();
let cert_file = cert.serialize_pem().unwrap();
let key_file = cert.serialize_private_key_pem();
let cert_file = &mut BufReader::new(cert_file.as_bytes());
let key_file = &mut BufReader::new(key_file.as_bytes());
let cert_chain = certs(cert_file)
.unwrap()
.into_iter()
.map(Certificate)
.collect();
let mut keys = pkcs8_private_keys(key_file).unwrap();
let mut config = rustls::ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_single_cert(cert_chain, PrivateKey(keys.remove(0)))
.unwrap();
config.alpn_protocols.push(b"http/1.1".to_vec());
config.alpn_protocols.push(b"h2".to_vec());
config
}

View File

@ -1,5 +0,0 @@
max_width = 89
reorder_imports = true
#wrap_comments = true
#fn_args_density = "Compressed"
#use_small_heuristics = false

View File

@ -1,723 +0,0 @@
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, mem};
use bytes::{Bytes, BytesMut};
use futures_core::Stream;
use futures_util::ready;
use pin_project::pin_project;
use crate::error::Error;
#[derive(Debug, PartialEq, Copy, Clone)]
/// Body size hint
pub enum BodySize {
None,
Empty,
Sized(u64),
Stream,
}
impl BodySize {
pub fn is_eof(&self) -> bool {
matches!(self, BodySize::None | BodySize::Empty | BodySize::Sized(0))
}
}
/// Type that provides this trait can be streamed to a peer.
pub trait MessageBody {
fn size(&self) -> BodySize;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>>;
downcast_get_type_id!();
}
downcast!(MessageBody);
impl MessageBody for () {
fn size(&self) -> BodySize {
BodySize::Empty
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
Poll::Ready(None)
}
}
impl<T: MessageBody + Unpin> MessageBody for Box<T> {
fn size(&self) -> BodySize {
self.as_ref().size()
}
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
Pin::new(self.get_mut().as_mut()).poll_next(cx)
}
}
#[pin_project(project = ResponseBodyProj)]
pub enum ResponseBody<B> {
Body(#[pin] B),
Other(#[pin] Body),
}
impl ResponseBody<Body> {
pub fn into_body<B>(self) -> ResponseBody<B> {
match self {
ResponseBody::Body(b) => ResponseBody::Other(b),
ResponseBody::Other(b) => ResponseBody::Other(b),
}
}
}
impl<B> ResponseBody<B> {
pub fn take_body(&mut self) -> ResponseBody<B> {
std::mem::replace(self, ResponseBody::Other(Body::None))
}
}
impl<B: MessageBody> ResponseBody<B> {
pub fn as_ref(&self) -> Option<&B> {
if let ResponseBody::Body(ref b) = self {
Some(b)
} else {
None
}
}
}
impl<B: MessageBody> MessageBody for ResponseBody<B> {
fn size(&self) -> BodySize {
match self {
ResponseBody::Body(ref body) => body.size(),
ResponseBody::Other(ref body) => body.size(),
}
}
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
match self.project() {
ResponseBodyProj::Body(body) => body.poll_next(cx),
ResponseBodyProj::Other(body) => body.poll_next(cx),
}
}
}
impl<B: MessageBody> Stream for ResponseBody<B> {
type Item = Result<Bytes, Error>;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
match self.project() {
ResponseBodyProj::Body(body) => body.poll_next(cx),
ResponseBodyProj::Other(body) => body.poll_next(cx),
}
}
}
#[pin_project(project = BodyProj)]
/// Represents various types of http message body.
pub enum Body {
/// Empty response. `Content-Length` header is not set.
None,
/// Zero sized response body. `Content-Length` header is set to `0`.
Empty,
/// Specific response body.
Bytes(Bytes),
/// Generic message body.
Message(Box<dyn MessageBody + Unpin>),
}
impl Body {
/// Create body from slice (copy)
pub fn from_slice(s: &[u8]) -> Body {
Body::Bytes(Bytes::copy_from_slice(s))
}
/// Create body from generic message body.
pub fn from_message<B: MessageBody + Unpin + 'static>(body: B) -> Body {
Body::Message(Box::new(body))
}
}
impl MessageBody for Body {
fn size(&self) -> BodySize {
match self {
Body::None => BodySize::None,
Body::Empty => BodySize::Empty,
Body::Bytes(ref bin) => BodySize::Sized(bin.len() as u64),
Body::Message(ref body) => body.size(),
}
}
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
match self.project() {
BodyProj::None => Poll::Ready(None),
BodyProj::Empty => Poll::Ready(None),
BodyProj::Bytes(ref mut bin) => {
let len = bin.len();
if len == 0 {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(bin))))
}
}
BodyProj::Message(ref mut body) => Pin::new(body.as_mut()).poll_next(cx),
}
}
}
impl PartialEq for Body {
fn eq(&self, other: &Body) -> bool {
match *self {
Body::None => matches!(*other, Body::None),
Body::Empty => matches!(*other, Body::Empty),
Body::Bytes(ref b) => match *other {
Body::Bytes(ref b2) => b == b2,
_ => false,
},
Body::Message(_) => false,
}
}
}
impl fmt::Debug for Body {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Body::None => write!(f, "Body::None"),
Body::Empty => write!(f, "Body::Empty"),
Body::Bytes(ref b) => write!(f, "Body::Bytes({:?})", b),
Body::Message(_) => write!(f, "Body::Message(_)"),
}
}
}
impl From<&'static str> for Body {
fn from(s: &'static str) -> Body {
Body::Bytes(Bytes::from_static(s.as_ref()))
}
}
impl From<&'static [u8]> for Body {
fn from(s: &'static [u8]) -> Body {
Body::Bytes(Bytes::from_static(s))
}
}
impl From<Vec<u8>> for Body {
fn from(vec: Vec<u8>) -> Body {
Body::Bytes(Bytes::from(vec))
}
}
impl From<String> for Body {
fn from(s: String) -> Body {
s.into_bytes().into()
}
}
impl<'a> From<&'a String> for Body {
fn from(s: &'a String) -> Body {
Body::Bytes(Bytes::copy_from_slice(AsRef::<[u8]>::as_ref(&s)))
}
}
impl From<Bytes> for Body {
fn from(s: Bytes) -> Body {
Body::Bytes(s)
}
}
impl From<BytesMut> for Body {
fn from(s: BytesMut) -> Body {
Body::Bytes(s.freeze())
}
}
impl From<serde_json::Value> for Body {
fn from(v: serde_json::Value) -> Body {
Body::Bytes(v.to_string().into())
}
}
impl<S> From<SizedStream<S>> for Body
where
S: Stream<Item = Result<Bytes, Error>> + Unpin + 'static,
{
fn from(s: SizedStream<S>) -> Body {
Body::from_message(s)
}
}
impl<S, E> From<BodyStream<S, E>> for Body
where
S: Stream<Item = Result<Bytes, E>> + Unpin + 'static,
E: Into<Error> + 'static,
{
fn from(s: BodyStream<S, E>) -> Body {
Body::from_message(s)
}
}
impl MessageBody for Bytes {
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()))))
}
}
}
impl MessageBody for BytesMut {
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()).freeze())))
}
}
}
impl MessageBody for &'static str {
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from_static(
mem::take(self.get_mut()).as_ref(),
))))
}
}
}
impl MessageBody for Vec<u8> {
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from(mem::take(self.get_mut())))))
}
}
}
impl MessageBody for String {
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from(
mem::take(self.get_mut()).into_bytes(),
))))
}
}
}
/// Type represent streaming body.
/// Response does not contain `content-length` header and appropriate transfer encoding is used.
#[pin_project]
pub struct BodyStream<S: Unpin, E> {
#[pin]
stream: S,
_t: PhantomData<E>,
}
impl<S, E> BodyStream<S, E>
where
S: Stream<Item = Result<Bytes, E>> + Unpin,
E: Into<Error>,
{
pub fn new(stream: S) -> Self {
BodyStream {
stream,
_t: PhantomData,
}
}
}
impl<S, E> MessageBody for BodyStream<S, E>
where
S: Stream<Item = Result<Bytes, E>> + Unpin,
E: Into<Error>,
{
fn size(&self) -> BodySize {
BodySize::Stream
}
/// Attempts to pull out the next value of the underlying [`Stream`].
///
/// Empty values are skipped to prevent [`BodyStream`]'s transmission being
/// ended on a zero-length chunk, but rather proceed until the underlying
/// [`Stream`] ends.
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
let mut stream = self.project().stream;
loop {
let stream = stream.as_mut();
return Poll::Ready(match ready!(stream.poll_next(cx)) {
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
opt => opt.map(|res| res.map_err(Into::into)),
});
}
}
}
/// Type represent streaming body. This body implementation should be used
/// if total size of stream is known. Data get sent as is without using transfer encoding.
#[pin_project]
pub struct SizedStream<S: Unpin> {
size: u64,
#[pin]
stream: S,
}
impl<S> SizedStream<S>
where
S: Stream<Item = Result<Bytes, Error>> + Unpin,
{
pub fn new(size: u64, stream: S) -> Self {
SizedStream { size, stream }
}
}
impl<S> MessageBody for SizedStream<S>
where
S: Stream<Item = Result<Bytes, Error>> + Unpin,
{
fn size(&self) -> BodySize {
BodySize::Sized(self.size as u64)
}
/// Attempts to pull out the next value of the underlying [`Stream`].
///
/// Empty values are skipped to prevent [`SizedStream`]'s transmission being
/// ended on a zero-length chunk, but rather proceed until the underlying
/// [`Stream`] ends.
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
let mut stream: Pin<&mut S> = self.project().stream;
loop {
let stream = stream.as_mut();
return Poll::Ready(match ready!(stream.poll_next(cx)) {
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
val => val,
});
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures_util::future::poll_fn;
use futures_util::pin_mut;
use futures_util::stream;
impl Body {
pub(crate) fn get_ref(&self) -> &[u8] {
match *self {
Body::Bytes(ref bin) => &bin,
_ => panic!(),
}
}
}
impl ResponseBody<Body> {
pub(crate) fn get_ref(&self) -> &[u8] {
match *self {
ResponseBody::Body(ref b) => b.get_ref(),
ResponseBody::Other(ref b) => b.get_ref(),
}
}
}
#[actix_rt::test]
async fn test_static_str() {
assert_eq!(Body::from("").size(), BodySize::Sized(0));
assert_eq!(Body::from("test").size(), BodySize::Sized(4));
assert_eq!(Body::from("test").get_ref(), b"test");
assert_eq!("test".size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| Pin::new(&mut "test").poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_static_bytes() {
assert_eq!(Body::from(b"test".as_ref()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b"test".as_ref()).get_ref(), b"test");
assert_eq!(
Body::from_slice(b"test".as_ref()).size(),
BodySize::Sized(4)
);
assert_eq!(Body::from_slice(b"test".as_ref()).get_ref(), b"test");
let sb = Bytes::from(&b"test"[..]);
pin_mut!(sb);
assert_eq!(sb.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| sb.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_vec() {
assert_eq!(Body::from(Vec::from("test")).size(), BodySize::Sized(4));
assert_eq!(Body::from(Vec::from("test")).get_ref(), b"test");
let test_vec = Vec::from("test");
pin_mut!(test_vec);
assert_eq!(test_vec.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| test_vec.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_bytes() {
let b = Bytes::from("test");
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
pin_mut!(b);
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_bytes_mut() {
let b = BytesMut::from("test");
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
pin_mut!(b);
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_string() {
let b = "test".to_owned();
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
assert_eq!(Body::from(&b).size(), BodySize::Sized(4));
assert_eq!(Body::from(&b).get_ref(), b"test");
pin_mut!(b);
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_unit() {
assert_eq!(().size(), BodySize::Empty);
assert!(poll_fn(|cx| Pin::new(&mut ()).poll_next(cx))
.await
.is_none());
}
#[actix_rt::test]
async fn test_box() {
let val = Box::new(());
pin_mut!(val);
assert_eq!(val.size(), BodySize::Empty);
assert!(poll_fn(|cx| val.as_mut().poll_next(cx)).await.is_none());
}
#[actix_rt::test]
async fn test_body_eq() {
assert!(
Body::Bytes(Bytes::from_static(b"1"))
== Body::Bytes(Bytes::from_static(b"1"))
);
assert!(Body::Bytes(Bytes::from_static(b"1")) != Body::None);
}
#[actix_rt::test]
async fn test_body_debug() {
assert!(format!("{:?}", Body::None).contains("Body::None"));
assert!(format!("{:?}", Body::Empty).contains("Body::Empty"));
assert!(format!("{:?}", Body::Bytes(Bytes::from_static(b"1"))).contains('1'));
}
#[actix_rt::test]
async fn test_serde_json() {
use serde_json::json;
assert_eq!(
Body::from(serde_json::Value::String("test".into())).size(),
BodySize::Sized(6)
);
assert_eq!(
Body::from(json!({"test-key":"test-value"})).size(),
BodySize::Sized(25)
);
}
mod body_stream {
use super::*;
//use futures::task::noop_waker;
//use futures::stream::once;
#[actix_rt::test]
async fn skips_empty_chunks() {
let body = BodyStream::new(stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok(Bytes::from(v)) as Result<Bytes, ()>),
));
pin_mut!(body);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("1")),
);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("2")),
);
}
/* Now it does not compile as it should
#[actix_rt::test]
async fn move_pinned_pointer() {
let (sender, receiver) = futures::channel::oneshot::channel();
let mut body_stream = Ok(BodyStream::new(once(async {
let x = Box::new(0i32);
let y = &x;
receiver.await.unwrap();
let _z = **y;
Ok::<_, ()>(Bytes::new())
})));
let waker = noop_waker();
let mut context = Context::from_waker(&waker);
pin_mut!(body_stream);
let _ = body_stream.as_mut().unwrap().poll_next(&mut context);
sender.send(()).unwrap();
let _ = std::mem::replace(&mut body_stream, Err([0; 32])).unwrap().poll_next(&mut context);
}*/
}
mod sized_stream {
use super::*;
#[actix_rt::test]
async fn skips_empty_chunks() {
let body = SizedStream::new(
2,
stream::iter(["1", "", "2"].iter().map(|&v| Ok(Bytes::from(v)))),
);
pin_mut!(body);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("1")),
);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("2")),
);
}
}
#[actix_rt::test]
async fn test_body_casting() {
let mut body = String::from("hello cast");
let resp_body: &mut dyn MessageBody = &mut body;
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast");
let body = &mut resp_body.downcast_mut::<String>().unwrap();
body.push('!');
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast!");
let not_body = resp_body.downcast_ref::<()>();
assert!(not_body.is_none());
}
}

View File

@ -0,0 +1,215 @@
use std::{
error::Error as StdError,
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use futures_core::{ready, Stream};
use pin_project_lite::pin_project;
use super::{BodySize, MessageBody};
pin_project! {
/// Streaming response wrapper.
///
/// Response does not contain `Content-Length` header and appropriate transfer encoding is used.
pub struct BodyStream<S> {
#[pin]
stream: S,
}
}
// TODO: from_infallible method
impl<S, E> BodyStream<S>
where
S: Stream<Item = Result<Bytes, E>>,
E: Into<Box<dyn StdError>> + 'static,
{
#[inline]
pub fn new(stream: S) -> Self {
BodyStream { stream }
}
}
impl<S, E> MessageBody for BodyStream<S>
where
S: Stream<Item = Result<Bytes, E>>,
E: Into<Box<dyn StdError>> + 'static,
{
type Error = E;
#[inline]
fn size(&self) -> BodySize {
BodySize::Stream
}
/// Attempts to pull out the next value of the underlying [`Stream`].
///
/// Empty values are skipped to prevent [`BodyStream`]'s transmission being
/// ended on a zero-length chunk, but rather proceed until the underlying
/// [`Stream`] ends.
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
loop {
let stream = self.as_mut().project().stream;
let chunk = match ready!(stream.poll_next(cx)) {
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
opt => opt,
};
return Poll::Ready(chunk);
}
}
}
#[cfg(test)]
mod tests {
use std::{convert::Infallible, time::Duration};
use actix_rt::{
pin,
time::{sleep, Sleep},
};
use actix_utils::future::poll_fn;
use derive_more::{Display, Error};
use futures_core::ready;
use futures_util::{stream, FutureExt as _};
use pin_project_lite::pin_project;
use static_assertions::{assert_impl_all, assert_not_impl_all};
use super::*;
use crate::body::to_bytes;
assert_impl_all!(BodyStream<stream::Empty<Result<Bytes, crate::Error>>>: MessageBody);
assert_impl_all!(BodyStream<stream::Empty<Result<Bytes, &'static str>>>: MessageBody);
assert_impl_all!(BodyStream<stream::Repeat<Result<Bytes, &'static str>>>: MessageBody);
assert_impl_all!(BodyStream<stream::Empty<Result<Bytes, Infallible>>>: MessageBody);
assert_impl_all!(BodyStream<stream::Repeat<Result<Bytes, Infallible>>>: MessageBody);
assert_not_impl_all!(BodyStream<stream::Empty<Bytes>>: MessageBody);
assert_not_impl_all!(BodyStream<stream::Repeat<Bytes>>: MessageBody);
// crate::Error is not Clone
assert_not_impl_all!(BodyStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody);
#[actix_rt::test]
async fn skips_empty_chunks() {
let body = BodyStream::new(stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok::<_, Infallible>(Bytes::from(v))),
));
pin!(body);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("1")),
);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("2")),
);
}
#[actix_rt::test]
async fn read_to_bytes() {
let body = BodyStream::new(stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok::<_, Infallible>(Bytes::from(v))),
));
assert_eq!(to_bytes(body).await.ok(), Some(Bytes::from("12")));
}
#[derive(Debug, Display, Error)]
#[display(fmt = "stream error")]
struct StreamErr;
#[actix_rt::test]
async fn stream_immediate_error() {
let body = BodyStream::new(stream::once(async { Err(StreamErr) }));
assert!(matches!(to_bytes(body).await, Err(StreamErr)));
}
#[actix_rt::test]
async fn stream_string_error() {
// `&'static str` does not impl `Error`
// but it does impl `Into<Box<dyn Error>>`
let body = BodyStream::new(stream::once(async { Err("stringy error") }));
assert!(matches!(to_bytes(body).await, Err("stringy error")));
}
#[actix_rt::test]
async fn stream_boxed_error() {
// `Box<dyn Error>` does not impl `Error`
// but it does impl `Into<Box<dyn Error>>`
let body = BodyStream::new(stream::once(async {
Err(Box::<dyn StdError>::from("stringy error"))
}));
assert_eq!(
to_bytes(body).await.unwrap_err().to_string(),
"stringy error"
);
}
#[actix_rt::test]
async fn stream_delayed_error() {
let body = BodyStream::new(stream::iter(vec![Ok(Bytes::from("1")), Err(StreamErr)]));
assert!(matches!(to_bytes(body).await, Err(StreamErr)));
pin_project! {
#[derive(Debug)]
#[project = TimeDelayStreamProj]
enum TimeDelayStream {
Start,
Sleep { delay: Pin<Box<Sleep>> },
Done,
}
}
impl Stream for TimeDelayStream {
type Item = Result<Bytes, StreamErr>;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
match self.as_mut().get_mut() {
TimeDelayStream::Start => {
let sleep = sleep(Duration::from_millis(1));
self.as_mut().set(TimeDelayStream::Sleep {
delay: Box::pin(sleep),
});
cx.waker().wake_by_ref();
Poll::Pending
}
TimeDelayStream::Sleep { ref mut delay } => {
ready!(delay.poll_unpin(cx));
self.set(TimeDelayStream::Done);
cx.waker().wake_by_ref();
Poll::Pending
}
TimeDelayStream::Done => Poll::Ready(Some(Err(StreamErr))),
}
}
}
let body = BodyStream::new(TimeDelayStream::Start);
assert!(matches!(to_bytes(body).await, Err(StreamErr)));
}
}

View File

@ -0,0 +1,127 @@
use std::{
error::Error as StdError,
fmt,
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use super::{BodySize, MessageBody, MessageBodyMapErr};
use crate::body;
/// A boxed message body with boxed errors.
#[derive(Debug)]
pub struct BoxBody(BoxBodyInner);
enum BoxBodyInner {
None(body::None),
Bytes(Bytes),
Stream(Pin<Box<dyn MessageBody<Error = Box<dyn StdError>>>>),
}
impl fmt::Debug for BoxBodyInner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::None(arg0) => f.debug_tuple("None").field(arg0).finish(),
Self::Bytes(arg0) => f.debug_tuple("Bytes").field(arg0).finish(),
Self::Stream(_) => f.debug_tuple("Stream").field(&"dyn MessageBody").finish(),
}
}
}
impl BoxBody {
/// Same as `MessageBody::boxed`.
///
/// If the body type to wrap is unknown or generic it is better to use [`MessageBody::boxed`] to
/// avoid double boxing.
#[inline]
pub fn new<B>(body: B) -> Self
where
B: MessageBody + 'static,
{
match body.size() {
BodySize::None => Self(BoxBodyInner::None(body::None)),
_ => match body.try_into_bytes() {
Ok(bytes) => Self(BoxBodyInner::Bytes(bytes)),
Err(body) => {
let body = MessageBodyMapErr::new(body, Into::into);
Self(BoxBodyInner::Stream(Box::pin(body)))
}
},
}
}
/// Returns a mutable pinned reference to the inner message body type.
#[inline]
pub fn as_pin_mut(&mut self) -> Pin<&mut Self> {
Pin::new(self)
}
}
impl MessageBody for BoxBody {
type Error = Box<dyn StdError>;
#[inline]
fn size(&self) -> BodySize {
match &self.0 {
BoxBodyInner::None(none) => none.size(),
BoxBodyInner::Bytes(bytes) => bytes.size(),
BoxBodyInner::Stream(stream) => stream.size(),
}
}
#[inline]
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
match &mut self.0 {
BoxBodyInner::None(body) => {
Pin::new(body).poll_next(cx).map_err(|err| match err {})
}
BoxBodyInner::Bytes(body) => {
Pin::new(body).poll_next(cx).map_err(|err| match err {})
}
BoxBodyInner::Stream(body) => Pin::new(body).poll_next(cx),
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
match self.0 {
BoxBodyInner::None(body) => Ok(body.try_into_bytes().unwrap()),
BoxBodyInner::Bytes(body) => Ok(body.try_into_bytes().unwrap()),
_ => Err(self),
}
}
#[inline]
fn boxed(self) -> BoxBody {
self
}
}
#[cfg(test)]
mod tests {
use static_assertions::{assert_impl_all, assert_not_impl_all};
use super::*;
use crate::body::to_bytes;
assert_impl_all!(BoxBody: MessageBody, fmt::Debug, Unpin);
assert_not_impl_all!(BoxBody: Send, Sync, Unpin);
#[actix_rt::test]
async fn nested_boxed_body() {
let body = Bytes::from_static(&[1, 2, 3]);
let boxed_body = BoxBody::new(BoxBody::new(body));
assert_eq!(
to_bytes(boxed_body).await.unwrap(),
Bytes::from(vec![1, 2, 3]),
);
}
}

View File

@ -0,0 +1,108 @@
use std::{
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use pin_project_lite::pin_project;
use super::{BodySize, BoxBody, MessageBody};
use crate::Error;
pin_project! {
#[project = EitherBodyProj]
#[derive(Debug, Clone)]
pub enum EitherBody<L, R = BoxBody> {
/// A body of type `L`.
Left { #[pin] body: L },
/// A body of type `R`.
Right { #[pin] body: R },
}
}
impl<L> EitherBody<L, BoxBody> {
/// Creates new `EitherBody` using left variant and boxed right variant.
#[inline]
pub fn new(body: L) -> Self {
Self::Left { body }
}
}
impl<L, R> EitherBody<L, R> {
/// Creates new `EitherBody` using left variant.
#[inline]
pub fn left(body: L) -> Self {
Self::Left { body }
}
/// Creates new `EitherBody` using right variant.
#[inline]
pub fn right(body: R) -> Self {
Self::Right { body }
}
}
impl<L, R> MessageBody for EitherBody<L, R>
where
L: MessageBody + 'static,
R: MessageBody + 'static,
{
type Error = Error;
#[inline]
fn size(&self) -> BodySize {
match self {
EitherBody::Left { body } => body.size(),
EitherBody::Right { body } => body.size(),
}
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
match self.project() {
EitherBodyProj::Left { body } => body
.poll_next(cx)
.map_err(|err| Error::new_body().with_cause(err)),
EitherBodyProj::Right { body } => body
.poll_next(cx)
.map_err(|err| Error::new_body().with_cause(err)),
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
match self {
EitherBody::Left { body } => body
.try_into_bytes()
.map_err(|body| EitherBody::Left { body }),
EitherBody::Right { body } => body
.try_into_bytes()
.map_err(|body| EitherBody::Right { body }),
}
}
#[inline]
fn boxed(self) -> BoxBody {
match self {
EitherBody::Left { body } => body.boxed(),
EitherBody::Right { body } => body.boxed(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn type_parameter_inference() {
let _body: EitherBody<(), _> = EitherBody::new(());
let _body: EitherBody<_, ()> = EitherBody::left(());
let _body: EitherBody<(), _> = EitherBody::right(());
}
}

View File

@ -0,0 +1,554 @@
//! [`MessageBody`] trait and foreign implementations.
use std::{
convert::Infallible,
error::Error as StdError,
mem,
pin::Pin,
task::{Context, Poll},
};
use bytes::{Bytes, BytesMut};
use futures_core::ready;
use pin_project_lite::pin_project;
use super::{BodySize, BoxBody};
/// An interface types that can converted to bytes and used as response bodies.
// TODO: examples
pub trait MessageBody {
/// The type of error that will be returned if streaming body fails.
///
/// Since it is not appropriate to generate a response mid-stream, it only requires `Error` for
/// internal use and logging.
type Error: Into<Box<dyn StdError>>;
/// Body size hint.
///
/// If [`BodySize::None`] is returned, optimizations that skip reading the body are allowed.
fn size(&self) -> BodySize;
/// Attempt to pull out the next chunk of body bytes.
// TODO: expand documentation
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>>;
/// Try to convert into the complete chunk of body bytes.
///
/// Implement this method if the entire body can be trivially extracted. This is useful for
/// optimizations where `poll_next` calls can be avoided.
///
/// Body types with [`BodySize::None`] are allowed to return empty `Bytes`. Although, if calling
/// this method, it is recommended to check `size` first and return early.
///
/// # Errors
/// The default implementation will error and return the original type back to the caller for
/// further use.
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self>
where
Self: Sized,
{
Err(self)
}
/// Converts this body into `BoxBody`.
#[inline]
fn boxed(self) -> BoxBody
where
Self: Sized + 'static,
{
BoxBody::new(self)
}
}
mod foreign_impls {
use super::*;
impl MessageBody for Infallible {
type Error = Infallible;
fn size(&self) -> BodySize {
match *self {}
}
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
match *self {}
}
}
impl MessageBody for () {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(0)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
Poll::Ready(None)
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::new())
}
}
impl<B> MessageBody for Box<B>
where
B: MessageBody + Unpin + ?Sized,
{
type Error = B::Error;
#[inline]
fn size(&self) -> BodySize {
self.as_ref().size()
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
Pin::new(self.get_mut().as_mut()).poll_next(cx)
}
}
impl<B> MessageBody for Pin<Box<B>>
where
B: MessageBody + ?Sized,
{
type Error = B::Error;
#[inline]
fn size(&self) -> BodySize {
self.as_ref().size()
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
self.get_mut().as_mut().poll_next(cx)
}
}
impl MessageBody for &'static [u8] {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from_static(mem::take(self.get_mut())))))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::from_static(self))
}
}
impl MessageBody for Bytes {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()))))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(self)
}
}
impl MessageBody for BytesMut {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()).freeze())))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(self.freeze())
}
}
impl MessageBody for Vec<u8> {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()).into())))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::from(self))
}
}
impl MessageBody for &'static str {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
let string = mem::take(self.get_mut());
let bytes = Bytes::from_static(string.as_bytes());
Poll::Ready(Some(Ok(bytes)))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::from_static(self.as_bytes()))
}
}
impl MessageBody for String {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
let string = mem::take(self.get_mut());
Poll::Ready(Some(Ok(Bytes::from(string))))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::from(self))
}
}
impl MessageBody for bytestring::ByteString {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
let string = mem::take(self.get_mut());
Poll::Ready(Some(Ok(string.into_bytes())))
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(self.into_bytes())
}
}
}
pin_project! {
pub(crate) struct MessageBodyMapErr<B, F> {
#[pin]
body: B,
mapper: Option<F>,
}
}
impl<B, F, E> MessageBodyMapErr<B, F>
where
B: MessageBody,
F: FnOnce(B::Error) -> E,
{
pub(crate) fn new(body: B, mapper: F) -> Self {
Self {
body,
mapper: Some(mapper),
}
}
}
impl<B, F, E> MessageBody for MessageBodyMapErr<B, F>
where
B: MessageBody,
F: FnOnce(B::Error) -> E,
E: Into<Box<dyn StdError>>,
{
type Error = E;
#[inline]
fn size(&self) -> BodySize {
self.body.size()
}
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
let this = self.as_mut().project();
match ready!(this.body.poll_next(cx)) {
Some(Err(err)) => {
let f = self.as_mut().project().mapper.take().unwrap();
let mapped_err = (f)(err);
Poll::Ready(Some(Err(mapped_err)))
}
Some(Ok(val)) => Poll::Ready(Some(Ok(val))),
None => Poll::Ready(None),
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
let Self { body, mapper } = self;
body.try_into_bytes().map_err(|body| Self { body, mapper })
}
}
#[cfg(test)]
mod tests {
use actix_rt::pin;
use actix_utils::future::poll_fn;
use bytes::{Bytes, BytesMut};
use super::*;
use crate::body::{self, EitherBody};
macro_rules! assert_poll_next {
($pin:expr, $exp:expr) => {
assert_eq!(
poll_fn(|cx| $pin.as_mut().poll_next(cx))
.await
.unwrap() // unwrap option
.unwrap(), // unwrap result
$exp
);
};
}
macro_rules! assert_poll_next_none {
($pin:expr) => {
assert!(poll_fn(|cx| $pin.as_mut().poll_next(cx)).await.is_none());
};
}
#[actix_rt::test]
async fn boxing_equivalence() {
assert_eq!(().size(), BodySize::Sized(0));
assert_eq!(().size(), Box::new(()).size());
assert_eq!(().size(), Box::pin(()).size());
let pl = Box::new(());
pin!(pl);
assert_poll_next_none!(pl);
let mut pl = Box::pin(());
assert_poll_next_none!(pl);
}
#[actix_rt::test]
async fn test_unit() {
let pl = ();
assert_eq!(pl.size(), BodySize::Sized(0));
pin!(pl);
assert_poll_next_none!(pl);
}
#[actix_rt::test]
async fn test_static_str() {
assert_eq!("".size(), BodySize::Sized(0));
assert_eq!("test".size(), BodySize::Sized(4));
let pl = "test";
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn test_static_bytes() {
assert_eq!(b"".as_ref().size(), BodySize::Sized(0));
assert_eq!(b"test".as_ref().size(), BodySize::Sized(4));
let pl = b"test".as_ref();
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn test_vec() {
assert_eq!(vec![0; 0].size(), BodySize::Sized(0));
assert_eq!(Vec::from("test").size(), BodySize::Sized(4));
let pl = Vec::from("test");
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn test_bytes() {
assert_eq!(Bytes::new().size(), BodySize::Sized(0));
assert_eq!(Bytes::from_static(b"test").size(), BodySize::Sized(4));
let pl = Bytes::from_static(b"test");
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn test_bytes_mut() {
assert_eq!(BytesMut::new().size(), BodySize::Sized(0));
assert_eq!(BytesMut::from(b"test".as_ref()).size(), BodySize::Sized(4));
let pl = BytesMut::from("test");
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn test_string() {
assert_eq!(String::new().size(), BodySize::Sized(0));
assert_eq!("test".to_owned().size(), BodySize::Sized(4));
let pl = "test".to_owned();
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn complete_body_combinators() {
let body = Bytes::from_static(b"test");
let body = BoxBody::new(body);
let body = EitherBody::<_, ()>::left(body);
let body = EitherBody::<(), _>::right(body);
// Do not support try_into_bytes:
// let body = Box::new(body);
// let body = Box::pin(body);
assert_eq!(body.try_into_bytes().unwrap(), Bytes::from("test"));
}
#[actix_rt::test]
async fn complete_body_combinators_poll() {
let body = Bytes::from_static(b"test");
let body = BoxBody::new(body);
let body = EitherBody::<_, ()>::left(body);
let body = EitherBody::<(), _>::right(body);
let mut body = body;
assert_eq!(body.size(), BodySize::Sized(4));
assert_poll_next!(Pin::new(&mut body), Bytes::from("test"));
assert_poll_next_none!(Pin::new(&mut body));
}
#[actix_rt::test]
async fn none_body_combinators() {
fn none_body() -> BoxBody {
let body = body::None;
let body = BoxBody::new(body);
let body = EitherBody::<_, ()>::left(body);
let body = EitherBody::<(), _>::right(body);
body.boxed()
}
assert_eq!(none_body().size(), BodySize::None);
assert_eq!(none_body().try_into_bytes().unwrap(), Bytes::new());
assert_poll_next_none!(Pin::new(&mut none_body()));
}
// down-casting used to be done with a method on MessageBody trait
// test is kept to demonstrate equivalence of Any trait
#[actix_rt::test]
async fn test_body_casting() {
let mut body = String::from("hello cast");
// let mut resp_body: &mut dyn MessageBody<Error = Error> = &mut body;
let resp_body: &mut dyn std::any::Any = &mut body;
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast");
let body = &mut resp_body.downcast_mut::<String>().unwrap();
body.push('!');
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast!");
let not_body = resp_body.downcast_ref::<()>();
assert!(not_body.is_none());
}
}

View File

@ -0,0 +1,20 @@
//! Traits and structures to aid consuming and writing HTTP payloads.
mod body_stream;
mod boxed;
mod either;
mod message_body;
mod none;
mod size;
mod sized_stream;
mod utils;
pub use self::body_stream::BodyStream;
pub use self::boxed::BoxBody;
pub use self::either::EitherBody;
pub use self::message_body::MessageBody;
pub(crate) use self::message_body::MessageBodyMapErr;
pub use self::none::None;
pub use self::size::BodySize;
pub use self::sized_stream::SizedStream;
pub use self::utils::to_bytes;

View File

@ -0,0 +1,48 @@
use std::{
convert::Infallible,
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use super::{BodySize, MessageBody};
/// Body type for responses that forbid payloads.
///
/// Distinct from an empty response which would contain a Content-Length header.
///
/// For an "empty" body, use `()` or `Bytes::new()`.
#[derive(Debug, Clone, Copy, Default)]
#[non_exhaustive]
pub struct None;
impl None {
/// Constructs new "none" body.
#[inline]
pub fn new() -> Self {
None
}
}
impl MessageBody for None {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::None
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
Poll::Ready(Option::None)
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::new())
}
}

View File

@ -0,0 +1,41 @@
/// Body size hint.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum BodySize {
/// Implicitly empty body.
///
/// Will omit the Content-Length header. Used for responses to certain methods (e.g., `HEAD`) or
/// with particular status codes (e.g., 204 No Content). Consumers that read this as a body size
/// hint are allowed to make optimizations that skip reading or writing the payload.
None,
/// Known size body.
///
/// Will write `Content-Length: N` header.
Sized(u64),
/// Unknown size body.
///
/// Will not write Content-Length header. Can be used with chunked Transfer-Encoding.
Stream,
}
impl BodySize {
/// Equivalent to `BodySize::Sized(0)`;
pub const ZERO: Self = Self::Sized(0);
/// Returns true if size hint indicates omitted or empty body.
///
/// Streams will return false because it cannot be known without reading the stream.
///
/// ```
/// # use actix_http::body::BodySize;
/// assert!(BodySize::None.is_eof());
/// assert!(BodySize::Sized(0).is_eof());
///
/// assert!(!BodySize::Sized(64).is_eof());
/// assert!(!BodySize::Stream.is_eof());
/// ```
pub fn is_eof(&self) -> bool {
matches!(self, BodySize::None | BodySize::Sized(0))
}
}

View File

@ -0,0 +1,171 @@
use std::{
error::Error as StdError,
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use futures_core::{ready, Stream};
use pin_project_lite::pin_project;
use super::{BodySize, MessageBody};
pin_project! {
/// Known sized streaming response wrapper.
///
/// This body implementation should be used if total size of stream is known. Data is sent as-is
/// without using chunked transfer encoding.
pub struct SizedStream<S> {
size: u64,
#[pin]
stream: S,
}
}
impl<S, E> SizedStream<S>
where
S: Stream<Item = Result<Bytes, E>>,
E: Into<Box<dyn StdError>> + 'static,
{
#[inline]
pub fn new(size: u64, stream: S) -> Self {
SizedStream { size, stream }
}
}
// TODO: from_infallible method
impl<S, E> MessageBody for SizedStream<S>
where
S: Stream<Item = Result<Bytes, E>>,
E: Into<Box<dyn StdError>> + 'static,
{
type Error = E;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.size as u64)
}
/// Attempts to pull out the next value of the underlying [`Stream`].
///
/// Empty values are skipped to prevent [`SizedStream`]'s transmission being
/// ended on a zero-length chunk, but rather proceed until the underlying
/// [`Stream`] ends.
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
loop {
let stream = self.as_mut().project().stream;
let chunk = match ready!(stream.poll_next(cx)) {
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
val => val,
};
return Poll::Ready(chunk);
}
}
}
#[cfg(test)]
mod tests {
use std::convert::Infallible;
use actix_rt::pin;
use actix_utils::future::poll_fn;
use futures_util::stream;
use static_assertions::{assert_impl_all, assert_not_impl_all};
use super::*;
use crate::body::to_bytes;
assert_impl_all!(SizedStream<stream::Empty<Result<Bytes, crate::Error>>>: MessageBody);
assert_impl_all!(SizedStream<stream::Empty<Result<Bytes, &'static str>>>: MessageBody);
assert_impl_all!(SizedStream<stream::Repeat<Result<Bytes, &'static str>>>: MessageBody);
assert_impl_all!(SizedStream<stream::Empty<Result<Bytes, Infallible>>>: MessageBody);
assert_impl_all!(SizedStream<stream::Repeat<Result<Bytes, Infallible>>>: MessageBody);
assert_not_impl_all!(SizedStream<stream::Empty<Bytes>>: MessageBody);
assert_not_impl_all!(SizedStream<stream::Repeat<Bytes>>: MessageBody);
// crate::Error is not Clone
assert_not_impl_all!(SizedStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody);
#[actix_rt::test]
async fn skips_empty_chunks() {
let body = SizedStream::new(
2,
stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok::<_, Infallible>(Bytes::from(v))),
),
);
pin!(body);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("1")),
);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("2")),
);
}
#[actix_rt::test]
async fn read_to_bytes() {
let body = SizedStream::new(
2,
stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok::<_, Infallible>(Bytes::from(v))),
),
);
assert_eq!(to_bytes(body).await.ok(), Some(Bytes::from("12")));
}
#[actix_rt::test]
async fn stream_string_error() {
// `&'static str` does not impl `Error`
// but it does impl `Into<Box<dyn Error>>`
let body = SizedStream::new(0, stream::once(async { Err("stringy error") }));
assert_eq!(to_bytes(body).await, Ok(Bytes::new()));
let body = SizedStream::new(1, stream::once(async { Err("stringy error") }));
assert!(matches!(to_bytes(body).await, Err("stringy error")));
}
#[actix_rt::test]
async fn stream_boxed_error() {
// `Box<dyn Error>` does not impl `Error`
// but it does impl `Into<Box<dyn Error>>`
let body = SizedStream::new(
0,
stream::once(async { Err(Box::<dyn StdError>::from("stringy error")) }),
);
assert_eq!(to_bytes(body).await.unwrap(), Bytes::new());
let body = SizedStream::new(
1,
stream::once(async { Err(Box::<dyn StdError>::from("stringy error")) }),
);
assert_eq!(
to_bytes(body).await.unwrap_err().to_string(),
"stringy error"
);
}
}

View File

@ -0,0 +1,77 @@
use std::task::Poll;
use actix_rt::pin;
use actix_utils::future::poll_fn;
use bytes::{Bytes, BytesMut};
use futures_core::ready;
use super::{BodySize, MessageBody};
/// Collects the body produced by a `MessageBody` implementation into `Bytes`.
///
/// Any errors produced by the body stream are returned immediately.
///
/// # Examples
/// ```
/// use actix_http::body::{self, to_bytes};
/// use bytes::Bytes;
///
/// # async fn test_to_bytes() {
/// let body = body::None::new();
/// let bytes = to_bytes(body).await.unwrap();
/// assert!(bytes.is_empty());
///
/// let body = Bytes::from_static(b"123");
/// let bytes = to_bytes(body).await.unwrap();
/// assert_eq!(bytes, b"123"[..]);
/// # }
/// ```
pub async fn to_bytes<B: MessageBody>(body: B) -> Result<Bytes, B::Error> {
let cap = match body.size() {
BodySize::None | BodySize::Sized(0) => return Ok(Bytes::new()),
BodySize::Sized(size) => size as usize,
// good enough first guess for chunk size
BodySize::Stream => 32_768,
};
let mut buf = BytesMut::with_capacity(cap);
pin!(body);
poll_fn(|cx| loop {
let body = body.as_mut();
match ready!(body.poll_next(cx)) {
Some(Ok(bytes)) => buf.extend_from_slice(&*bytes),
None => return Poll::Ready(Ok(())),
Some(Err(err)) => return Poll::Ready(Err(err)),
}
})
.await?;
Ok(buf.freeze())
}
#[cfg(test)]
mod test {
use futures_util::{stream, StreamExt as _};
use super::*;
use crate::{body::BodyStream, Error};
#[actix_rt::test]
async fn test_to_bytes() {
let bytes = to_bytes(()).await.unwrap();
assert!(bytes.is_empty());
let body = Bytes::from_static(b"123");
let bytes = to_bytes(body).await.unwrap();
assert_eq!(bytes, b"123"[..]);
let stream = stream::iter(vec![Bytes::from_static(b"123"), Bytes::from_static(b"abc")])
.map(Ok::<_, Error>);
let body = BodyStream::new(stream);
let bytes = to_bytes(body).await.unwrap();
assert_eq!(bytes, b"123abc"[..]);
}
}

View File

@ -1,25 +1,22 @@
use std::marker::PhantomData;
use std::rc::Rc;
use std::{fmt, net};
use std::{fmt, marker::PhantomData, net, rc::Rc};
use actix_codec::Framed;
use actix_service::{IntoServiceFactory, Service, ServiceFactory};
use crate::body::MessageBody;
use crate::config::{KeepAlive, ServiceConfig};
use crate::error::Error;
use crate::h1::{Codec, ExpectHandler, H1Service, UpgradeHandler};
use crate::h2::H2Service;
use crate::helpers::{Data, DataFactory};
use crate::request::Request;
use crate::response::Response;
use crate::service::HttpService;
use crate::{
body::{BoxBody, MessageBody},
config::{KeepAlive, ServiceConfig},
h1::{self, ExpectHandler, H1Service, UpgradeHandler},
h2::H2Service,
service::HttpService,
ConnectCallback, Extensions, Request, Response,
};
/// A http service builder
/// A HTTP service builder
///
/// This type can be used to construct an instance of `http service` through a
/// This type can be used to construct an instance of [`HttpService`] through a
/// builder-like pattern.
pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler<T>> {
pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler> {
keep_alive: KeepAlive,
client_timeout: u64,
client_disconnect: u64,
@ -27,18 +24,19 @@ pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler<T>> {
local_addr: Option<net::SocketAddr>,
expect: X,
upgrade: Option<U>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
_t: PhantomData<(T, S)>,
on_connect_ext: Option<Rc<ConnectCallback<T>>>,
_phantom: PhantomData<S>,
}
impl<T, S> HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler<T>>
impl<T, S> HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error> + 'static,
S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Response<BoxBody>> + 'static,
S::InitError: fmt::Debug,
<S::Service as Service>::Future: 'static,
<S::Service as Service<Request>>::Future: 'static,
{
/// Create instance of `ServiceConfigBuilder`
#[allow(clippy::new_without_default)]
pub fn new() -> Self {
HttpServiceBuilder {
keep_alive: KeepAlive::Timeout(5),
@ -48,26 +46,24 @@ where
local_addr: None,
expect: ExpectHandler,
upgrade: None,
on_connect: None,
_t: PhantomData,
on_connect_ext: None,
_phantom: PhantomData,
}
}
}
impl<T, S, X, U> HttpServiceBuilder<T, S, X, U>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error> + 'static,
S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Response<BoxBody>> + 'static,
S::InitError: fmt::Debug,
<S::Service as Service>::Future: 'static,
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
X::Error: Into<Error>,
<S::Service as Service<Request>>::Future: 'static,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
<X::Service as Service>::Future: 'static,
U: ServiceFactory<Config = (), Request = (Request, Framed<T, Codec>), Response = ()>,
U: ServiceFactory<(Request, Framed<T, h1::Codec>), Config = (), Response = ()>,
U::Error: fmt::Display,
U::InitError: fmt::Debug,
<U::Service as Service>::Future: 'static,
{
/// Set server keep-alive setting.
///
@ -123,11 +119,10 @@ where
/// request will be forwarded to main service.
pub fn expect<F, X1>(self, expect: F) -> HttpServiceBuilder<T, S, X1, U>
where
F: IntoServiceFactory<X1>,
X1: ServiceFactory<Config = (), Request = Request, Response = Request>,
X1::Error: Into<Error>,
F: IntoServiceFactory<X1, Request>,
X1: ServiceFactory<Request, Config = (), Response = Request>,
X1::Error: Into<Response<BoxBody>>,
X1::InitError: fmt::Debug,
<X1::Service as Service>::Future: 'static,
{
HttpServiceBuilder {
keep_alive: self.keep_alive,
@ -137,8 +132,8 @@ where
local_addr: self.local_addr,
expect: expect.into_factory(),
upgrade: self.upgrade,
on_connect: self.on_connect,
_t: PhantomData,
on_connect_ext: self.on_connect_ext,
_phantom: PhantomData,
}
}
@ -148,15 +143,10 @@ where
/// and this service get called with original request and framed object.
pub fn upgrade<F, U1>(self, upgrade: F) -> HttpServiceBuilder<T, S, X, U1>
where
F: IntoServiceFactory<U1>,
U1: ServiceFactory<
Config = (),
Request = (Request, Framed<T, Codec>),
Response = (),
>,
F: IntoServiceFactory<U1, (Request, Framed<T, h1::Codec>)>,
U1: ServiceFactory<(Request, Framed<T, h1::Codec>), Config = (), Response = ()>,
U1::Error: fmt::Display,
U1::InitError: fmt::Debug,
<U1::Service as Service>::Future: 'static,
{
HttpServiceBuilder {
keep_alive: self.keep_alive,
@ -166,30 +156,30 @@ where
local_addr: self.local_addr,
expect: self.expect,
upgrade: Some(upgrade.into_factory()),
on_connect: self.on_connect,
_t: PhantomData,
on_connect_ext: self.on_connect_ext,
_phantom: PhantomData,
}
}
/// Set on-connect callback.
/// Sets the callback to be run on connection establishment.
///
/// It get called once per connection and result of the call
/// get stored to the request's extensions.
pub fn on_connect<F, I>(mut self, f: F) -> Self
/// Has mutable access to a data container that will be merged into request extensions.
/// This enables transport layer data (like client certificates) to be accessed in middleware
/// and handlers.
pub fn on_connect_ext<F>(mut self, f: F) -> Self
where
F: Fn(&T) -> I + 'static,
I: Clone + 'static,
F: Fn(&T, &mut Extensions) + 'static,
{
self.on_connect = Some(Rc::new(move |io| Box::new(Data(f(io)))));
self.on_connect_ext = Some(Rc::new(f));
self
}
/// Finish service configuration and create *http service* for HTTP/1 protocol.
/// Finish service configuration and create a HTTP Service for HTTP/1 protocol.
pub fn h1<F, B>(self, service: F) -> H1Service<T, S, B, X, U>
where
B: MessageBody,
F: IntoServiceFactory<S>,
S::Error: Into<Error>,
F: IntoServiceFactory<S, Request>,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
{
@ -200,21 +190,22 @@ where
self.secure,
self.local_addr,
);
H1Service::with_config(cfg, service.into_factory())
.expect(self.expect)
.upgrade(self.upgrade)
.on_connect(self.on_connect)
.on_connect_ext(self.on_connect_ext)
}
/// Finish service configuration and create *http service* for HTTP/2 protocol.
/// Finish service configuration and create a HTTP service for HTTP/2 protocol.
pub fn h2<F, B>(self, service: F) -> H2Service<T, S, B>
where
B: MessageBody + 'static,
F: IntoServiceFactory<S>,
S::Error: Into<Error> + 'static,
F: IntoServiceFactory<S, Request>,
S::Error: Into<Response<BoxBody>> + 'static,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
{
let cfg = ServiceConfig::new(
self.keep_alive,
@ -223,18 +214,19 @@ where
self.secure,
self.local_addr,
);
H2Service::with_config(cfg, service.into_factory()).on_connect(self.on_connect)
H2Service::with_config(cfg, service.into_factory()).on_connect_ext(self.on_connect_ext)
}
/// Finish service configuration and create `HttpService` instance.
pub fn finish<F, B>(self, service: F) -> HttpService<T, S, B, X, U>
where
B: MessageBody + 'static,
F: IntoServiceFactory<S>,
S::Error: Into<Error> + 'static,
F: IntoServiceFactory<S, Request>,
S::Error: Into<Response<BoxBody>> + 'static,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
{
let cfg = ServiceConfig::new(
self.keep_alive,
@ -243,9 +235,10 @@ where
self.secure,
self.local_addr,
);
HttpService::with_config(cfg, service.into_factory())
.expect(self.expect)
.upgrade(self.upgrade)
.on_connect(self.on_connect)
.on_connect_ext(self.on_connect_ext)
}
}

View File

@ -1,291 +0,0 @@
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, io, mem, time};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use bytes::{Buf, Bytes};
use futures_util::future::{err, Either, FutureExt, LocalBoxFuture, Ready};
use h2::client::SendRequest;
use pin_project::pin_project;
use crate::body::MessageBody;
use crate::h1::ClientCodec;
use crate::message::{RequestHeadType, ResponseHead};
use crate::payload::Payload;
use super::error::SendRequestError;
use super::pool::{Acquired, Protocol};
use super::{h1proto, h2proto};
pub(crate) enum ConnectionType<Io> {
H1(Io),
H2(SendRequest<Bytes>),
}
pub trait Connection {
type Io: AsyncRead + AsyncWrite + Unpin;
type Future: Future<Output = Result<(ResponseHead, Payload), SendRequestError>>;
fn protocol(&self) -> Protocol;
/// Send request and body
fn send_request<B: MessageBody + 'static, H: Into<RequestHeadType>>(
self,
head: H,
body: B,
) -> Self::Future;
type TunnelFuture: Future<
Output = Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
>;
/// Send request, returns Response and Framed
fn open_tunnel<H: Into<RequestHeadType>>(self, head: H) -> Self::TunnelFuture;
}
pub(crate) trait ConnectionLifetime: AsyncRead + AsyncWrite + 'static {
/// Close connection
fn close(self: Pin<&mut Self>);
/// Release connection to the connection pool
fn release(self: Pin<&mut Self>);
}
#[doc(hidden)]
/// HTTP client connection
pub struct IoConnection<T> {
io: Option<ConnectionType<T>>,
created: time::Instant,
pool: Option<Acquired<T>>,
}
impl<T> fmt::Debug for IoConnection<T>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.io {
Some(ConnectionType::H1(ref io)) => write!(f, "H1Connection({:?})", io),
Some(ConnectionType::H2(_)) => write!(f, "H2Connection"),
None => write!(f, "Connection(Empty)"),
}
}
}
impl<T: AsyncRead + AsyncWrite + Unpin> IoConnection<T> {
pub(crate) fn new(
io: ConnectionType<T>,
created: time::Instant,
pool: Option<Acquired<T>>,
) -> Self {
IoConnection {
pool,
created,
io: Some(io),
}
}
pub(crate) fn into_inner(self) -> (ConnectionType<T>, time::Instant) {
(self.io.unwrap(), self.created)
}
}
impl<T> Connection for IoConnection<T>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Io = T;
type Future =
LocalBoxFuture<'static, Result<(ResponseHead, Payload), SendRequestError>>;
fn protocol(&self) -> Protocol {
match self.io {
Some(ConnectionType::H1(_)) => Protocol::Http1,
Some(ConnectionType::H2(_)) => Protocol::Http2,
None => Protocol::Http1,
}
}
fn send_request<B: MessageBody + 'static, H: Into<RequestHeadType>>(
mut self,
head: H,
body: B,
) -> Self::Future {
match self.io.take().unwrap() {
ConnectionType::H1(io) => {
h1proto::send_request(io, head.into(), body, self.created, self.pool)
.boxed_local()
}
ConnectionType::H2(io) => {
h2proto::send_request(io, head.into(), body, self.created, self.pool)
.boxed_local()
}
}
}
type TunnelFuture = Either<
LocalBoxFuture<
'static,
Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
>,
Ready<Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>>,
>;
/// Send request, returns Response and Framed
fn open_tunnel<H: Into<RequestHeadType>>(mut self, head: H) -> Self::TunnelFuture {
match self.io.take().unwrap() {
ConnectionType::H1(io) => {
Either::Left(h1proto::open_tunnel(io, head.into()).boxed_local())
}
ConnectionType::H2(io) => {
if let Some(mut pool) = self.pool.take() {
pool.release(IoConnection::new(
ConnectionType::H2(io),
self.created,
None,
));
}
Either::Right(err(SendRequestError::TunnelNotSupported))
}
}
}
}
#[allow(dead_code)]
pub(crate) enum EitherConnection<A, B> {
A(IoConnection<A>),
B(IoConnection<B>),
}
impl<A, B> Connection for EitherConnection<A, B>
where
A: AsyncRead + AsyncWrite + Unpin + 'static,
B: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Io = EitherIo<A, B>;
type Future =
LocalBoxFuture<'static, Result<(ResponseHead, Payload), SendRequestError>>;
fn protocol(&self) -> Protocol {
match self {
EitherConnection::A(con) => con.protocol(),
EitherConnection::B(con) => con.protocol(),
}
}
fn send_request<RB: MessageBody + 'static, H: Into<RequestHeadType>>(
self,
head: H,
body: RB,
) -> Self::Future {
match self {
EitherConnection::A(con) => con.send_request(head, body),
EitherConnection::B(con) => con.send_request(head, body),
}
}
type TunnelFuture = LocalBoxFuture<
'static,
Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
>;
/// Send request, returns Response and Framed
fn open_tunnel<H: Into<RequestHeadType>>(self, head: H) -> Self::TunnelFuture {
match self {
EitherConnection::A(con) => con
.open_tunnel(head)
.map(|res| {
res.map(|(head, framed)| (head, framed.into_map_io(EitherIo::A)))
})
.boxed_local(),
EitherConnection::B(con) => con
.open_tunnel(head)
.map(|res| {
res.map(|(head, framed)| (head, framed.into_map_io(EitherIo::B)))
})
.boxed_local(),
}
}
}
#[pin_project(project = EitherIoProj)]
pub enum EitherIo<A, B> {
A(#[pin] A),
B(#[pin] B),
}
impl<A, B> AsyncRead for EitherIo<A, B>
where
A: AsyncRead,
B: AsyncRead,
{
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
match self.project() {
EitherIoProj::A(val) => val.poll_read(cx, buf),
EitherIoProj::B(val) => val.poll_read(cx, buf),
}
}
unsafe fn prepare_uninitialized_buffer(
&self,
buf: &mut [mem::MaybeUninit<u8>],
) -> bool {
match self {
EitherIo::A(ref val) => val.prepare_uninitialized_buffer(buf),
EitherIo::B(ref val) => val.prepare_uninitialized_buffer(buf),
}
}
}
impl<A, B> AsyncWrite for EitherIo<A, B>
where
A: AsyncWrite,
B: AsyncWrite,
{
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
match self.project() {
EitherIoProj::A(val) => val.poll_write(cx, buf),
EitherIoProj::B(val) => val.poll_write(cx, buf),
}
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
match self.project() {
EitherIoProj::A(val) => val.poll_flush(cx),
EitherIoProj::B(val) => val.poll_flush(cx),
}
}
fn poll_shutdown(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<io::Result<()>> {
match self.project() {
EitherIoProj::A(val) => val.poll_shutdown(cx),
EitherIoProj::B(val) => val.poll_shutdown(cx),
}
}
fn poll_write_buf<U: Buf>(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut U,
) -> Poll<Result<usize, io::Error>>
where
Self: Sized,
{
match self.project() {
EitherIoProj::A(val) => val.poll_write_buf(cx, buf),
EitherIoProj::B(val) => val.poll_write_buf(cx, buf),
}
}
}

View File

@ -1,547 +0,0 @@
use std::fmt;
use std::marker::PhantomData;
use std::time::Duration;
use actix_codec::{AsyncRead, AsyncWrite};
use actix_connect::{
default_connector, Connect as TcpConnect, Connection as TcpConnection,
};
use actix_rt::net::TcpStream;
use actix_service::{apply_fn, Service};
use actix_utils::timeout::{TimeoutError, TimeoutService};
use http::Uri;
use super::config::ConnectorConfig;
use super::connection::Connection;
use super::error::ConnectError;
use super::pool::{ConnectionPool, Protocol};
use super::Connect;
#[cfg(feature = "openssl")]
use actix_connect::ssl::openssl::SslConnector as OpensslConnector;
#[cfg(feature = "rustls")]
use actix_connect::ssl::rustls::ClientConfig;
#[cfg(feature = "rustls")]
use std::sync::Arc;
#[cfg(any(feature = "openssl", feature = "rustls"))]
enum SslConnector {
#[cfg(feature = "openssl")]
Openssl(OpensslConnector),
#[cfg(feature = "rustls")]
Rustls(Arc<ClientConfig>),
}
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
type SslConnector = ();
/// Manages http client network connectivity
/// The `Connector` type uses a builder-like combinator pattern for service
/// construction that finishes by calling the `.finish()` method.
///
/// ```rust,ignore
/// use std::time::Duration;
/// use actix_http::client::Connector;
///
/// let connector = Connector::new()
/// .timeout(Duration::from_secs(5))
/// .finish();
/// ```
pub struct Connector<T, U> {
connector: T,
config: ConnectorConfig,
#[allow(dead_code)]
ssl: SslConnector,
_t: PhantomData<U>,
}
trait Io: AsyncRead + AsyncWrite + Unpin {}
impl<T: AsyncRead + AsyncWrite + Unpin> Io for T {}
impl Connector<(), ()> {
#[allow(clippy::new_ret_no_self, clippy::let_unit_value)]
pub fn new() -> Connector<
impl Service<
Request = TcpConnect<Uri>,
Response = TcpConnection<Uri, TcpStream>,
Error = actix_connect::ConnectError,
> + Clone,
TcpStream,
> {
Connector {
ssl: Self::build_ssl(vec![b"h2".to_vec(), b"http/1.1".to_vec()]),
connector: default_connector(),
config: ConnectorConfig::default(),
_t: PhantomData,
}
}
// Build Ssl connector with openssl, based on supplied alpn protocols
#[cfg(feature = "openssl")]
fn build_ssl(protocols: Vec<Vec<u8>>) -> SslConnector {
use actix_connect::ssl::openssl::SslMethod;
use bytes::{BufMut, BytesMut};
let mut alpn = BytesMut::with_capacity(20);
for proto in protocols.iter() {
alpn.put_u8(proto.len() as u8);
alpn.put(proto.as_slice());
}
let mut ssl = OpensslConnector::builder(SslMethod::tls()).unwrap();
let _ = ssl
.set_alpn_protos(&alpn)
.map_err(|e| error!("Can not set alpn protocol: {:?}", e));
SslConnector::Openssl(ssl.build())
}
// Build Ssl connector with rustls, based on supplied alpn protocols
#[cfg(all(not(feature = "openssl"), feature = "rustls"))]
fn build_ssl(protocols: Vec<Vec<u8>>) -> SslConnector {
let mut config = ClientConfig::new();
config.set_protocols(&protocols);
config
.root_store
.add_server_trust_anchors(&actix_tls::rustls::TLS_SERVER_ROOTS);
SslConnector::Rustls(Arc::new(config))
}
// ssl turned off, provides empty ssl connector
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
fn build_ssl(_: Vec<Vec<u8>>) -> SslConnector {}
}
impl<T, U> Connector<T, U> {
/// Use custom connector.
pub fn connector<T1, U1>(self, connector: T1) -> Connector<T1, U1>
where
U1: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
T1: Service<
Request = TcpConnect<Uri>,
Response = TcpConnection<Uri, U1>,
Error = actix_connect::ConnectError,
> + Clone,
{
Connector {
connector,
config: self.config,
ssl: self.ssl,
_t: PhantomData,
}
}
}
impl<T, U> Connector<T, U>
where
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
T: Service<
Request = TcpConnect<Uri>,
Response = TcpConnection<Uri, U>,
Error = actix_connect::ConnectError,
> + Clone
+ 'static,
{
/// Connection timeout, i.e. max time to connect to remote host including dns name resolution.
/// Set to 1 second by default.
pub fn timeout(mut self, timeout: Duration) -> Self {
self.config.timeout = timeout;
self
}
#[cfg(feature = "openssl")]
/// Use custom `SslConnector` instance.
pub fn ssl(mut self, connector: OpensslConnector) -> Self {
self.ssl = SslConnector::Openssl(connector);
self
}
#[cfg(feature = "rustls")]
pub fn rustls(mut self, connector: Arc<ClientConfig>) -> Self {
self.ssl = SslConnector::Rustls(connector);
self
}
/// Maximum supported http major version
/// Supported versions http/1.1, http/2
pub fn max_http_version(mut self, val: http::Version) -> Self {
let versions = match val {
http::Version::HTTP_11 => vec![b"http/1.1".to_vec()],
http::Version::HTTP_2 => vec![b"h2".to_vec(), b"http/1.1".to_vec()],
_ => {
unimplemented!("actix-http:client: supported versions http/1.1, http/2")
}
};
self.ssl = Connector::build_ssl(versions);
self
}
/// Indicates the initial window size (in octets) for
/// HTTP2 stream-level flow control for received data.
///
/// The default value is 65,535 and is good for APIs, but not for big objects.
pub fn initial_window_size(mut self, size: u32) -> Self {
self.config.stream_window_size = size;
self
}
/// Indicates the initial window size (in octets) for
/// HTTP2 connection-level flow control for received data.
///
/// The default value is 65,535 and is good for APIs, but not for big objects.
pub fn initial_connection_window_size(mut self, size: u32) -> Self {
self.config.conn_window_size = size;
self
}
/// Set total number of simultaneous connections per type of scheme.
///
/// If limit is 0, the connector has no limit.
/// The default limit size is 100.
pub fn limit(mut self, limit: usize) -> Self {
self.config.limit = limit;
self
}
/// Set keep-alive period for opened connection.
///
/// Keep-alive period is the period between connection usage. If
/// the delay between repeated usages of the same connection
/// exceeds this period, the connection is closed.
/// Default keep-alive period is 15 seconds.
pub fn conn_keep_alive(mut self, dur: Duration) -> Self {
self.config.conn_keep_alive = dur;
self
}
/// Set max lifetime period for connection.
///
/// Connection lifetime is max lifetime of any opened connection
/// until it is closed regardless of keep-alive period.
/// Default lifetime period is 75 seconds.
pub fn conn_lifetime(mut self, dur: Duration) -> Self {
self.config.conn_lifetime = dur;
self
}
/// Set server connection disconnect timeout in milliseconds.
///
/// Defines a timeout for disconnect connection. If a disconnect procedure does not complete
/// within this time, the socket get dropped. This timeout affects only secure connections.
///
/// To disable timeout set value to 0.
///
/// By default disconnect timeout is set to 3000 milliseconds.
pub fn disconnect_timeout(mut self, dur: Duration) -> Self {
self.config.disconnect_timeout = Some(dur);
self
}
/// Finish configuration process and create connector service.
/// The Connector builder always concludes by calling `finish()` last in
/// its combinator chain.
pub fn finish(
self,
) -> impl Service<Request = Connect, Response = impl Connection, Error = ConnectError>
+ Clone {
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
{
let connector = TimeoutService::new(
self.config.timeout,
apply_fn(self.connector, |msg: Connect, srv| {
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
})
.map_err(ConnectError::from)
.map(|stream| (stream.into_parts().0, Protocol::Http1)),
)
.map_err(|e| match e {
TimeoutError::Service(e) => e,
TimeoutError::Timeout => ConnectError::Timeout,
});
connect_impl::InnerConnector {
tcp_pool: ConnectionPool::new(
connector,
self.config.no_disconnect_timeout(),
),
}
}
#[cfg(any(feature = "openssl", feature = "rustls"))]
{
const H2: &[u8] = b"h2";
#[cfg(feature = "openssl")]
use actix_connect::ssl::openssl::OpensslConnector;
#[cfg(feature = "rustls")]
use actix_connect::ssl::rustls::{RustlsConnector, Session};
use actix_service::{boxed::service, pipeline};
let ssl_service = TimeoutService::new(
self.config.timeout,
pipeline(
apply_fn(self.connector.clone(), |msg: Connect, srv| {
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
})
.map_err(ConnectError::from),
)
.and_then(match self.ssl {
#[cfg(feature = "openssl")]
SslConnector::Openssl(ssl) => service(
OpensslConnector::service(ssl)
.map(|stream| {
let sock = stream.into_parts().0;
let h2 = sock
.ssl()
.selected_alpn_protocol()
.map(|protos| protos.windows(2).any(|w| w == H2))
.unwrap_or(false);
if h2 {
(Box::new(sock) as Box<dyn Io>, Protocol::Http2)
} else {
(Box::new(sock) as Box<dyn Io>, Protocol::Http1)
}
})
.map_err(ConnectError::from),
),
#[cfg(feature = "rustls")]
SslConnector::Rustls(ssl) => service(
RustlsConnector::service(ssl)
.map_err(ConnectError::from)
.map(|stream| {
let sock = stream.into_parts().0;
let h2 = sock
.get_ref()
.1
.get_alpn_protocol()
.map(|protos| protos.windows(2).any(|w| w == H2))
.unwrap_or(false);
if h2 {
(Box::new(sock) as Box<dyn Io>, Protocol::Http2)
} else {
(Box::new(sock) as Box<dyn Io>, Protocol::Http1)
}
}),
),
}),
)
.map_err(|e| match e {
TimeoutError::Service(e) => e,
TimeoutError::Timeout => ConnectError::Timeout,
});
let tcp_service = TimeoutService::new(
self.config.timeout,
apply_fn(self.connector, |msg: Connect, srv| {
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
})
.map_err(ConnectError::from)
.map(|stream| (stream.into_parts().0, Protocol::Http1)),
)
.map_err(|e| match e {
TimeoutError::Service(e) => e,
TimeoutError::Timeout => ConnectError::Timeout,
});
connect_impl::InnerConnector {
tcp_pool: ConnectionPool::new(
tcp_service,
self.config.no_disconnect_timeout(),
),
ssl_pool: ConnectionPool::new(ssl_service, self.config),
}
}
}
}
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
mod connect_impl {
use std::task::{Context, Poll};
use futures_util::future::{err, Either, Ready};
use super::*;
use crate::client::connection::IoConnection;
pub(crate) struct InnerConnector<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
pub(crate) tcp_pool: ConnectionPool<T, Io>,
}
impl<T, Io> Clone for InnerConnector<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
fn clone(&self) -> Self {
InnerConnector {
tcp_pool: self.tcp_pool.clone(),
}
}
}
impl<T, Io> Service for InnerConnector<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
type Request = Connect;
type Response = IoConnection<Io>;
type Error = ConnectError;
type Future = Either<
<ConnectionPool<T, Io> as Service>::Future,
Ready<Result<IoConnection<Io>, ConnectError>>,
>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.tcp_pool.poll_ready(cx)
}
fn call(&mut self, req: Connect) -> Self::Future {
match req.uri.scheme_str() {
Some("https") | Some("wss") => {
Either::Right(err(ConnectError::SslIsNotSupported))
}
_ => Either::Left(self.tcp_pool.call(req)),
}
}
}
}
#[cfg(any(feature = "openssl", feature = "rustls"))]
mod connect_impl {
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use futures_core::ready;
use futures_util::future::Either;
use super::*;
use crate::client::connection::EitherConnection;
pub(crate) struct InnerConnector<T1, T2, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>,
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>,
{
pub(crate) tcp_pool: ConnectionPool<T1, Io1>,
pub(crate) ssl_pool: ConnectionPool<T2, Io2>,
}
impl<T1, T2, Io1, Io2> Clone for InnerConnector<T1, T2, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
+ 'static,
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
{
fn clone(&self) -> Self {
InnerConnector {
tcp_pool: self.tcp_pool.clone(),
ssl_pool: self.ssl_pool.clone(),
}
}
}
impl<T1, T2, Io1, Io2> Service for InnerConnector<T1, T2, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
+ 'static,
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
{
type Request = Connect;
type Response = EitherConnection<Io1, Io2>;
type Error = ConnectError;
type Future = Either<
InnerConnectorResponseA<T1, Io1, Io2>,
InnerConnectorResponseB<T2, Io1, Io2>,
>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.tcp_pool.poll_ready(cx)
}
fn call(&mut self, req: Connect) -> Self::Future {
match req.uri.scheme_str() {
Some("https") | Some("wss") => Either::Right(InnerConnectorResponseB {
fut: self.ssl_pool.call(req),
_t: PhantomData,
}),
_ => Either::Left(InnerConnectorResponseA {
fut: self.tcp_pool.call(req),
_t: PhantomData,
}),
}
}
}
#[pin_project::pin_project]
pub(crate) struct InnerConnectorResponseA<T, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
+ 'static,
{
#[pin]
fut: <ConnectionPool<T, Io1> as Service>::Future,
_t: PhantomData<Io2>,
}
impl<T, Io1, Io2> Future for InnerConnectorResponseA<T, Io1, Io2>
where
T: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
+ 'static,
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Output = Result<EitherConnection<Io1, Io2>, ConnectError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(
ready!(Pin::new(&mut self.get_mut().fut).poll(cx))
.map(EitherConnection::A),
)
}
}
#[pin_project::pin_project]
pub(crate) struct InnerConnectorResponseB<T, Io1, Io2>
where
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
{
#[pin]
fut: <ConnectionPool<T, Io2> as Service>::Future,
_t: PhantomData<Io1>,
}
impl<T, Io1, Io2> Future for InnerConnectorResponseB<T, Io1, Io2>
where
T: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Output = Result<EitherConnection<Io1, Io2>, ConnectError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(
ready!(Pin::new(&mut self.get_mut().fut).poll(cx))
.map(EitherConnection::B),
)
}
}
}

View File

@ -1,298 +0,0 @@
use std::io::Write;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{io, mem, time};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use bytes::buf::BufMutExt;
use bytes::{Bytes, BytesMut};
use futures_core::Stream;
use futures_util::future::poll_fn;
use futures_util::{pin_mut, SinkExt, StreamExt};
use crate::error::PayloadError;
use crate::h1;
use crate::header::HeaderMap;
use crate::http::header::{IntoHeaderValue, HOST};
use crate::message::{RequestHeadType, ResponseHead};
use crate::payload::{Payload, PayloadStream};
use super::connection::{ConnectionLifetime, ConnectionType, IoConnection};
use super::error::{ConnectError, SendRequestError};
use super::pool::Acquired;
use crate::body::{BodySize, MessageBody};
pub(crate) async fn send_request<T, B>(
io: T,
mut head: RequestHeadType,
body: B,
created: time::Instant,
pool: Option<Acquired<T>>,
) -> Result<(ResponseHead, Payload), SendRequestError>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
B: MessageBody,
{
// set request host header
if !head.as_ref().headers.contains_key(HOST)
&& !head.extra_headers().iter().any(|h| h.contains_key(HOST))
{
if let Some(host) = head.as_ref().uri.host() {
let mut wrt = BytesMut::with_capacity(host.len() + 5).writer();
let _ = match head.as_ref().uri.port_u16() {
None | Some(80) | Some(443) => write!(wrt, "{}", host),
Some(port) => write!(wrt, "{}:{}", host, port),
};
match wrt.get_mut().split().freeze().try_into() {
Ok(value) => match head {
RequestHeadType::Owned(ref mut head) => {
head.headers.insert(HOST, value)
}
RequestHeadType::Rc(_, ref mut extra_headers) => {
let headers = extra_headers.get_or_insert(HeaderMap::new());
headers.insert(HOST, value)
}
},
Err(e) => log::error!("Can not set HOST header {}", e),
}
}
}
let io = H1Connection {
created,
pool,
io: Some(io),
};
// create Framed and send request
let mut framed_inner = Framed::new(io, h1::ClientCodec::default());
framed_inner.send((head, body.size()).into()).await?;
// send request body
match body.size() {
BodySize::None | BodySize::Empty | BodySize::Sized(0) => (),
_ => send_body(body, Pin::new(&mut framed_inner)).await?,
};
// read response and init read body
let res = Pin::new(&mut framed_inner).into_future().await;
let (head, framed) = if let (Some(result), framed) = res {
let item = result.map_err(SendRequestError::from)?;
(item, framed)
} else {
return Err(SendRequestError::from(ConnectError::Disconnected));
};
match framed.codec_ref().message_type() {
h1::MessageType::None => {
let force_close = !framed.codec_ref().keepalive();
release_connection(framed, force_close);
Ok((head, Payload::None))
}
_ => {
let pl: PayloadStream = PlStream::new(framed_inner).boxed_local();
Ok((head, pl.into()))
}
}
}
pub(crate) async fn open_tunnel<T>(
io: T,
head: RequestHeadType,
) -> Result<(ResponseHead, Framed<T, h1::ClientCodec>), SendRequestError>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
// create Framed and send request
let mut framed = Framed::new(io, h1::ClientCodec::default());
framed.send((head, BodySize::None).into()).await?;
// read response
if let (Some(result), framed) = framed.into_future().await {
let head = result.map_err(SendRequestError::from)?;
Ok((head, framed))
} else {
Err(SendRequestError::from(ConnectError::Disconnected))
}
}
/// send request body to the peer
pub(crate) async fn send_body<T, B>(
body: B,
mut framed: Pin<&mut Framed<T, h1::ClientCodec>>,
) -> Result<(), SendRequestError>
where
T: ConnectionLifetime + Unpin,
B: MessageBody,
{
pin_mut!(body);
let mut eof = false;
while !eof {
while !eof && !framed.as_ref().is_write_buf_full() {
match poll_fn(|cx| body.as_mut().poll_next(cx)).await {
Some(result) => {
framed.as_mut().write(h1::Message::Chunk(Some(result?)))?;
}
None => {
eof = true;
framed.as_mut().write(h1::Message::Chunk(None))?;
}
}
}
if !framed.as_ref().is_write_buf_empty() {
poll_fn(|cx| match framed.as_mut().flush(cx) {
Poll::Ready(Ok(_)) => Poll::Ready(Ok(())),
Poll::Ready(Err(err)) => Poll::Ready(Err(err)),
Poll::Pending => {
if !framed.as_ref().is_write_buf_full() {
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
}
})
.await?;
}
}
SinkExt::flush(Pin::into_inner(framed)).await?;
Ok(())
}
#[doc(hidden)]
/// HTTP client connection
pub struct H1Connection<T> {
/// T should be `Unpin`
io: Option<T>,
created: time::Instant,
pool: Option<Acquired<T>>,
}
impl<T> ConnectionLifetime for H1Connection<T>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
/// Close connection
fn close(mut self: Pin<&mut Self>) {
if let Some(mut pool) = self.pool.take() {
if let Some(io) = self.io.take() {
pool.close(IoConnection::new(
ConnectionType::H1(io),
self.created,
None,
));
}
}
}
/// Release this connection to the connection pool
fn release(mut self: Pin<&mut Self>) {
if let Some(mut pool) = self.pool.take() {
if let Some(io) = self.io.take() {
pool.release(IoConnection::new(
ConnectionType::H1(io),
self.created,
None,
));
}
}
}
}
impl<T: AsyncRead + AsyncWrite + Unpin + 'static> AsyncRead for H1Connection<T> {
unsafe fn prepare_uninitialized_buffer(
&self,
buf: &mut [mem::MaybeUninit<u8>],
) -> bool {
self.io.as_ref().unwrap().prepare_uninitialized_buffer(buf)
}
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.io.as_mut().unwrap()).poll_read(cx, buf)
}
}
impl<T: AsyncRead + AsyncWrite + Unpin + 'static> AsyncWrite for H1Connection<T> {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.io.as_mut().unwrap()).poll_write(cx, buf)
}
fn poll_flush(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<io::Result<()>> {
Pin::new(self.io.as_mut().unwrap()).poll_flush(cx)
}
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), io::Error>> {
Pin::new(self.io.as_mut().unwrap()).poll_shutdown(cx)
}
}
#[pin_project::pin_project]
pub(crate) struct PlStream<Io> {
#[pin]
framed: Option<Framed<Io, h1::ClientPayloadCodec>>,
}
impl<Io: ConnectionLifetime> PlStream<Io> {
fn new(framed: Framed<Io, h1::ClientCodec>) -> Self {
let framed = framed.into_map_codec(|codec| codec.into_payload_codec());
PlStream {
framed: Some(framed),
}
}
}
impl<Io: ConnectionLifetime> Stream for PlStream<Io> {
type Item = Result<Bytes, PayloadError>;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
let mut this = self.project();
match this.framed.as_mut().as_pin_mut().unwrap().next_item(cx)? {
Poll::Pending => Poll::Pending,
Poll::Ready(Some(chunk)) => {
if let Some(chunk) = chunk {
Poll::Ready(Some(Ok(chunk)))
} else {
let framed = this.framed.as_mut().as_pin_mut().unwrap();
let force_close = !framed.codec_ref().keepalive();
release_connection(framed, force_close);
Poll::Ready(None)
}
}
Poll::Ready(None) => Poll::Ready(None),
}
}
}
fn release_connection<T, U>(framed: Pin<&mut Framed<T, U>>, force_close: bool)
where
T: ConnectionLifetime,
{
if !force_close && framed.is_read_buf_empty() && framed.is_write_buf_empty() {
framed.io_pin().release()
} else {
framed.io_pin().close()
}
}

View File

@ -1,21 +0,0 @@
//! Http client api
use http::Uri;
mod config;
mod connection;
mod connector;
mod error;
mod h1proto;
mod h2proto;
mod pool;
pub use self::connection::Connection;
pub use self::connector::Connector;
pub use self::error::{ConnectError, FreezeRequestError, InvalidUrl, SendRequestError};
pub use self::pool::Protocol;
#[derive(Clone)]
pub struct Connect {
pub uri: Uri,
pub addr: Option<std::net::SocketAddr>,
}

View File

@ -1,644 +0,0 @@
use std::cell::RefCell;
use std::collections::VecDeque;
use std::future::Future;
use std::pin::Pin;
use std::rc::Rc;
use std::task::{Context, Poll};
use std::time::{Duration, Instant};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::time::{delay_for, Delay};
use actix_service::Service;
use actix_utils::{oneshot, task::LocalWaker};
use bytes::Bytes;
use futures_util::future::{poll_fn, FutureExt, LocalBoxFuture};
use fxhash::FxHashMap;
use h2::client::{Connection, SendRequest};
use http::uri::Authority;
use indexmap::IndexSet;
use pin_project::pin_project;
use slab::Slab;
use super::config::ConnectorConfig;
use super::connection::{ConnectionType, IoConnection};
use super::error::ConnectError;
use super::h2proto::handshake;
use super::Connect;
#[derive(Clone, Copy, PartialEq)]
/// Protocol version
pub enum Protocol {
Http1,
Http2,
}
#[derive(Hash, Eq, PartialEq, Clone, Debug)]
pub(crate) struct Key {
authority: Authority,
}
impl From<Authority> for Key {
fn from(authority: Authority) -> Key {
Key { authority }
}
}
/// Connections pool
pub(crate) struct ConnectionPool<T, Io: 'static>(Rc<RefCell<T>>, Rc<RefCell<Inner<Io>>>);
impl<T, Io> ConnectionPool<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
pub(crate) fn new(connector: T, config: ConnectorConfig) -> Self {
let connector_rc = Rc::new(RefCell::new(connector));
let inner_rc = Rc::new(RefCell::new(Inner {
config,
acquired: 0,
waiters: Slab::new(),
waiters_queue: IndexSet::new(),
available: FxHashMap::default(),
waker: LocalWaker::new(),
}));
// start support future
actix_rt::spawn(ConnectorPoolSupport {
connector: Rc::clone(&connector_rc),
inner: Rc::clone(&inner_rc),
});
ConnectionPool(connector_rc, inner_rc)
}
}
impl<T, Io> Clone for ConnectionPool<T, Io>
where
Io: 'static,
{
fn clone(&self) -> Self {
ConnectionPool(self.0.clone(), self.1.clone())
}
}
impl<T, Io> Drop for ConnectionPool<T, Io> {
fn drop(&mut self) {
// wake up the ConnectorPoolSupport when dropping so it can exit properly.
self.1.borrow().waker.wake();
}
}
impl<T, Io> Service for ConnectionPool<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
type Request = Connect;
type Response = IoConnection<Io>;
type Error = ConnectError;
type Future = LocalBoxFuture<'static, Result<IoConnection<Io>, ConnectError>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.poll_ready(cx)
}
fn call(&mut self, req: Connect) -> Self::Future {
let mut connector = self.0.clone();
let inner = self.1.clone();
let fut = async move {
let key = if let Some(authority) = req.uri.authority() {
authority.clone().into()
} else {
return Err(ConnectError::Unresolved);
};
// acquire connection
match poll_fn(|cx| Poll::Ready(inner.borrow_mut().acquire(&key, cx))).await {
Acquire::Acquired(io, created) => {
// use existing connection
Ok(IoConnection::new(
io,
created,
Some(Acquired(key, Some(inner))),
))
}
Acquire::Available => {
// open tcp connection
let (io, proto) = connector.call(req).await?;
let config = inner.borrow().config.clone();
let guard = OpenGuard::new(key, inner);
if proto == Protocol::Http1 {
Ok(IoConnection::new(
ConnectionType::H1(io),
Instant::now(),
Some(guard.consume()),
))
} else {
let (snd, connection) = handshake(io, &config).await?;
actix_rt::spawn(connection.map(|_| ()));
Ok(IoConnection::new(
ConnectionType::H2(snd),
Instant::now(),
Some(guard.consume()),
))
}
}
_ => {
// connection is not available, wait
let (rx, token) = inner.borrow_mut().wait_for(req);
let guard = WaiterGuard::new(key, token, inner);
let res = match rx.await {
Err(_) => Err(ConnectError::Disconnected),
Ok(res) => res,
};
guard.consume();
res
}
}
};
fut.boxed_local()
}
}
struct WaiterGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
key: Key,
token: usize,
inner: Option<Rc<RefCell<Inner<Io>>>>,
}
impl<Io> WaiterGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn new(key: Key, token: usize, inner: Rc<RefCell<Inner<Io>>>) -> Self {
Self {
key,
token,
inner: Some(inner),
}
}
fn consume(mut self) {
let _ = self.inner.take();
}
}
impl<Io> Drop for WaiterGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn drop(&mut self) {
if let Some(i) = self.inner.take() {
let mut inner = i.as_ref().borrow_mut();
inner.release_waiter(&self.key, self.token);
inner.check_availability();
}
}
}
struct OpenGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
key: Key,
inner: Option<Rc<RefCell<Inner<Io>>>>,
}
impl<Io> OpenGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn new(key: Key, inner: Rc<RefCell<Inner<Io>>>) -> Self {
Self {
key,
inner: Some(inner),
}
}
fn consume(mut self) -> Acquired<Io> {
Acquired(self.key.clone(), self.inner.take())
}
}
impl<Io> Drop for OpenGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn drop(&mut self) {
if let Some(i) = self.inner.take() {
let mut inner = i.as_ref().borrow_mut();
inner.release();
inner.check_availability();
}
}
}
enum Acquire<T> {
Acquired(ConnectionType<T>, Instant),
Available,
NotAvailable,
}
struct AvailableConnection<Io> {
io: ConnectionType<Io>,
used: Instant,
created: Instant,
}
pub(crate) struct Inner<Io> {
config: ConnectorConfig,
acquired: usize,
available: FxHashMap<Key, VecDeque<AvailableConnection<Io>>>,
waiters: Slab<
Option<(
Connect,
oneshot::Sender<Result<IoConnection<Io>, ConnectError>>,
)>,
>,
waiters_queue: IndexSet<(Key, usize)>,
waker: LocalWaker,
}
impl<Io> Inner<Io> {
fn reserve(&mut self) {
self.acquired += 1;
}
fn release(&mut self) {
self.acquired -= 1;
}
fn release_waiter(&mut self, key: &Key, token: usize) {
self.waiters.remove(token);
let _ = self.waiters_queue.shift_remove(&(key.clone(), token));
}
}
impl<Io> Inner<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
/// connection is not available, wait
fn wait_for(
&mut self,
connect: Connect,
) -> (
oneshot::Receiver<Result<IoConnection<Io>, ConnectError>>,
usize,
) {
let (tx, rx) = oneshot::channel();
let key: Key = connect.uri.authority().unwrap().clone().into();
let entry = self.waiters.vacant_entry();
let token = entry.key();
entry.insert(Some((connect, tx)));
assert!(self.waiters_queue.insert((key, token)));
(rx, token)
}
fn acquire(&mut self, key: &Key, cx: &mut Context<'_>) -> Acquire<Io> {
// check limits
if self.config.limit > 0 && self.acquired >= self.config.limit {
return Acquire::NotAvailable;
}
self.reserve();
// check if open connection is available
// cleanup stale connections at the same time
if let Some(ref mut connections) = self.available.get_mut(key) {
let now = Instant::now();
while let Some(conn) = connections.pop_back() {
// check if it still usable
if (now - conn.used) > self.config.conn_keep_alive
|| (now - conn.created) > self.config.conn_lifetime
{
if let Some(timeout) = self.config.disconnect_timeout {
if let ConnectionType::H1(io) = conn.io {
actix_rt::spawn(CloseConnection::new(io, timeout))
}
}
} else {
let mut io = conn.io;
let mut buf = [0; 2];
if let ConnectionType::H1(ref mut s) = io {
match Pin::new(s).poll_read(cx, &mut buf) {
Poll::Pending => (),
Poll::Ready(Ok(n)) if n > 0 => {
if let Some(timeout) = self.config.disconnect_timeout {
if let ConnectionType::H1(io) = io {
actix_rt::spawn(CloseConnection::new(
io, timeout,
))
}
}
continue;
}
_ => continue,
}
}
return Acquire::Acquired(io, conn.created);
}
}
}
Acquire::Available
}
fn release_conn(&mut self, key: &Key, io: ConnectionType<Io>, created: Instant) {
self.acquired -= 1;
self.available
.entry(key.clone())
.or_insert_with(VecDeque::new)
.push_back(AvailableConnection {
io,
created,
used: Instant::now(),
});
self.check_availability();
}
fn release_close(&mut self, io: ConnectionType<Io>) {
self.acquired -= 1;
if let Some(timeout) = self.config.disconnect_timeout {
if let ConnectionType::H1(io) = io {
actix_rt::spawn(CloseConnection::new(io, timeout))
}
}
self.check_availability();
}
fn check_availability(&self) {
if !self.waiters_queue.is_empty() && self.acquired < self.config.limit {
self.waker.wake();
}
}
}
struct CloseConnection<T> {
io: T,
timeout: Delay,
}
impl<T> CloseConnection<T>
where
T: AsyncWrite + Unpin,
{
fn new(io: T, timeout: Duration) -> Self {
CloseConnection {
io,
timeout: delay_for(timeout),
}
}
}
impl<T> Future for CloseConnection<T>
where
T: AsyncWrite + Unpin,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
let this = self.get_mut();
match Pin::new(&mut this.timeout).poll(cx) {
Poll::Ready(_) => Poll::Ready(()),
Poll::Pending => match Pin::new(&mut this.io).poll_shutdown(cx) {
Poll::Ready(_) => Poll::Ready(()),
Poll::Pending => Poll::Pending,
},
}
}
}
#[pin_project]
struct ConnectorPoolSupport<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
connector: T,
inner: Rc<RefCell<Inner<Io>>>,
}
impl<T, Io> Future for ConnectorPoolSupport<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>,
T::Future: 'static,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
if Rc::strong_count(this.inner) == 1 {
// If we are last copy of Inner<Io> it means the ConnectionPool is already gone
// and we are safe to exit.
return Poll::Ready(());
}
let mut inner = this.inner.borrow_mut();
inner.waker.register(cx.waker());
// check waiters
loop {
let (key, token) = {
if let Some((key, token)) = inner.waiters_queue.get_index(0) {
(key.clone(), *token)
} else {
break;
}
};
if inner.waiters.get(token).unwrap().is_none() {
continue;
}
match inner.acquire(&key, cx) {
Acquire::NotAvailable => break,
Acquire::Acquired(io, created) => {
let tx = inner.waiters.get_mut(token).unwrap().take().unwrap().1;
if let Err(conn) = tx.send(Ok(IoConnection::new(
io,
created,
Some(Acquired(key.clone(), Some(this.inner.clone()))),
))) {
let (io, created) = conn.unwrap().into_inner();
inner.release_conn(&key, io, created);
}
}
Acquire::Available => {
let (connect, tx) =
inner.waiters.get_mut(token).unwrap().take().unwrap();
OpenWaitingConnection::spawn(
key.clone(),
tx,
this.inner.clone(),
this.connector.call(connect),
inner.config.clone(),
);
}
}
let _ = inner.waiters_queue.swap_remove_index(0);
}
Poll::Pending
}
}
#[pin_project::pin_project(PinnedDrop)]
struct OpenWaitingConnection<F, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
#[pin]
fut: F,
key: Key,
h2: Option<
LocalBoxFuture<
'static,
Result<(SendRequest<Bytes>, Connection<Io, Bytes>), h2::Error>,
>,
>,
rx: Option<oneshot::Sender<Result<IoConnection<Io>, ConnectError>>>,
inner: Option<Rc<RefCell<Inner<Io>>>>,
config: ConnectorConfig,
}
impl<F, Io> OpenWaitingConnection<F, Io>
where
F: Future<Output = Result<(Io, Protocol), ConnectError>> + 'static,
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn spawn(
key: Key,
rx: oneshot::Sender<Result<IoConnection<Io>, ConnectError>>,
inner: Rc<RefCell<Inner<Io>>>,
fut: F,
config: ConnectorConfig,
) {
actix_rt::spawn(OpenWaitingConnection {
key,
fut,
h2: None,
rx: Some(rx),
inner: Some(inner),
config,
})
}
}
#[pin_project::pinned_drop]
impl<F, Io> PinnedDrop for OpenWaitingConnection<F, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn drop(self: Pin<&mut Self>) {
if let Some(inner) = self.project().inner.take() {
let mut inner = inner.as_ref().borrow_mut();
inner.release();
inner.check_availability();
}
}
}
impl<F, Io> Future for OpenWaitingConnection<F, Io>
where
F: Future<Output = Result<(Io, Protocol), ConnectError>>,
Io: AsyncRead + AsyncWrite + Unpin,
{
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.as_mut().project();
if let Some(ref mut h2) = this.h2 {
return match Pin::new(h2).poll(cx) {
Poll::Ready(Ok((snd, connection))) => {
actix_rt::spawn(connection.map(|_| ()));
let rx = this.rx.take().unwrap();
let _ = rx.send(Ok(IoConnection::new(
ConnectionType::H2(snd),
Instant::now(),
Some(Acquired(this.key.clone(), this.inner.take())),
)));
Poll::Ready(())
}
Poll::Pending => Poll::Pending,
Poll::Ready(Err(err)) => {
let _ = this.inner.take();
if let Some(rx) = this.rx.take() {
let _ = rx.send(Err(ConnectError::H2(err)));
}
Poll::Ready(())
}
};
}
match this.fut.poll(cx) {
Poll::Ready(Err(err)) => {
let _ = this.inner.take();
if let Some(rx) = this.rx.take() {
let _ = rx.send(Err(err));
}
Poll::Ready(())
}
Poll::Ready(Ok((io, proto))) => {
if proto == Protocol::Http1 {
let rx = this.rx.take().unwrap();
let _ = rx.send(Ok(IoConnection::new(
ConnectionType::H1(io),
Instant::now(),
Some(Acquired(this.key.clone(), this.inner.take())),
)));
Poll::Ready(())
} else {
*this.h2 = Some(handshake(io, this.config).boxed_local());
self.poll(cx)
}
}
Poll::Pending => Poll::Pending,
}
}
}
pub(crate) struct Acquired<T>(Key, Option<Rc<RefCell<Inner<T>>>>);
impl<T> Acquired<T>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
pub(crate) fn close(&mut self, conn: IoConnection<T>) {
if let Some(inner) = self.1.take() {
let (io, _) = conn.into_inner();
inner.as_ref().borrow_mut().release_close(io);
}
}
pub(crate) fn release(&mut self, conn: IoConnection<T>) {
if let Some(inner) = self.1.take() {
let (io, created) = conn.into_inner();
inner
.as_ref()
.borrow_mut()
.release_conn(&self.0, io, created);
}
}
}
impl<T> Drop for Acquired<T> {
fn drop(&mut self) {
if let Some(inner) = self.1.take() {
inner.as_ref().borrow_mut().release();
}
}
}

View File

@ -1,40 +0,0 @@
use std::cell::RefCell;
use std::rc::Rc;
use std::task::{Context, Poll};
use actix_service::Service;
#[doc(hidden)]
/// Service that allows to turn non-clone service to a service with `Clone` impl
///
/// # Panics
/// CloneableService might panic with some creative use of thread local storage.
/// See https://github.com/actix/actix-web/issues/1295 for example
pub(crate) struct CloneableService<T: Service>(Rc<RefCell<T>>);
impl<T: Service> CloneableService<T> {
pub(crate) fn new(service: T) -> Self {
Self(Rc::new(RefCell::new(service)))
}
}
impl<T: Service> Clone for CloneableService<T> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
impl<T: Service> Service for CloneableService<T> {
type Request = T::Request;
type Response = T::Response;
type Error = T::Error;
type Future = T::Future;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.borrow_mut().poll_ready(cx)
}
fn call(&mut self, req: T::Request) -> Self::Future {
self.0.borrow_mut().call(req)
}
}

View File

@ -1,24 +1,29 @@
use std::cell::Cell;
use std::fmt::Write;
use std::rc::Rc;
use std::time::Duration;
use std::{fmt, net};
use std::{
cell::Cell,
fmt::{self, Write},
net,
rc::Rc,
time::{Duration, SystemTime},
};
use actix_rt::time::{delay_for, delay_until, Delay, Instant};
use actix_rt::{
task::JoinHandle,
time::{interval, sleep_until, Instant, Sleep},
};
use bytes::BytesMut;
use futures_util::{future, FutureExt};
use time::OffsetDateTime;
// "Sun, 06 Nov 1994 08:49:37 GMT".len()
const DATE_VALUE_LENGTH: usize = 29;
/// "Sun, 06 Nov 1994 08:49:37 GMT".len()
pub(crate) const DATE_VALUE_LENGTH: usize = 29;
#[derive(Debug, PartialEq, Clone, Copy)]
/// Server keep-alive setting
pub enum KeepAlive {
/// Keep alive in seconds
Timeout(usize),
/// Rely on OS to shutdown tcp connection
Os,
/// Disabled
Disabled,
}
@ -49,7 +54,7 @@ struct Inner {
ka_enabled: bool,
secure: bool,
local_addr: Option<std::net::SocketAddr>,
timer: DateService,
date_service: DateService,
}
impl Clone for ServiceConfig {
@ -91,42 +96,42 @@ impl ServiceConfig {
client_disconnect,
secure,
local_addr,
timer: DateService::new(),
date_service: DateService::new(),
}))
}
/// Returns true if connection is secure (HTTPS)
#[inline]
/// Returns true if connection is secure(https)
pub fn secure(&self) -> bool {
self.0.secure
}
#[inline]
/// Returns the local address that this server is bound to.
///
/// Returns `None` for connections via UDS (Unix Domain Socket).
#[inline]
pub fn local_addr(&self) -> Option<net::SocketAddr> {
self.0.local_addr
}
#[inline]
/// Keep alive duration if configured.
#[inline]
pub fn keep_alive(&self) -> Option<Duration> {
self.0.keep_alive
}
#[inline]
/// Return state of connection keep-alive functionality
#[inline]
pub fn keep_alive_enabled(&self) -> bool {
self.0.ka_enabled
}
#[inline]
/// Client timeout for first request.
pub fn client_timer(&self) -> Option<Delay> {
#[inline]
pub fn client_timer(&self) -> Option<Sleep> {
let delay_time = self.0.client_timeout;
if delay_time != 0 {
Some(delay_until(
self.0.timer.now() + Duration::from_millis(delay_time),
))
Some(sleep_until(self.now() + Duration::from_millis(delay_time)))
} else {
None
}
@ -136,7 +141,7 @@ impl ServiceConfig {
pub fn client_timer_expire(&self) -> Option<Instant> {
let delay = self.0.client_timeout;
if delay != 0 {
Some(self.0.timer.now() + Duration::from_millis(delay))
Some(self.now() + Duration::from_millis(delay))
} else {
None
}
@ -146,34 +151,26 @@ impl ServiceConfig {
pub fn client_disconnect_timer(&self) -> Option<Instant> {
let delay = self.0.client_disconnect;
if delay != 0 {
Some(self.0.timer.now() + Duration::from_millis(delay))
Some(self.now() + Duration::from_millis(delay))
} else {
None
}
}
#[inline]
/// Return keep-alive timer delay is configured.
pub fn keep_alive_timer(&self) -> Option<Delay> {
if let Some(ka) = self.0.keep_alive {
Some(delay_until(self.0.timer.now() + ka))
} else {
None
}
#[inline]
pub fn keep_alive_timer(&self) -> Option<Sleep> {
self.keep_alive().map(|ka| sleep_until(self.now() + ka))
}
/// Keep-alive expire time
pub fn keep_alive_expire(&self) -> Option<Instant> {
if let Some(ka) = self.0.keep_alive {
Some(self.0.timer.now() + ka)
} else {
None
}
self.keep_alive().map(|ka| self.now() + ka)
}
#[inline]
pub(crate) fn now(&self) -> Instant {
self.0.timer.now()
self.0.date_service.now()
}
#[doc(hidden)]
@ -181,7 +178,7 @@ impl ServiceConfig {
let mut buf: [u8; 39] = [0; 39];
buf[..6].copy_from_slice(b"date: ");
self.0
.timer
.date_service
.set_date(|date| buf[6..35].copy_from_slice(&date.bytes));
buf[35..].copy_from_slice(b"\r\n\r\n");
dst.extend_from_slice(&buf);
@ -189,7 +186,7 @@ impl ServiceConfig {
pub(crate) fn set_date_header(&self, dst: &mut BytesMut) {
self.0
.timer
.date_service
.set_date(|date| dst.extend_from_slice(&date.bytes));
}
}
@ -212,12 +209,7 @@ impl Date {
fn update(&mut self) {
self.pos = 0;
write!(
self,
"{}",
OffsetDateTime::now_utc().format("%a, %d %b %Y %H:%M:%S GMT")
)
.unwrap();
write!(self, "{}", httpdate::fmt_http_date(SystemTime::now())).unwrap();
}
}
@ -230,57 +222,102 @@ impl fmt::Write for Date {
}
}
#[derive(Clone)]
struct DateService(Rc<DateServiceInner>);
struct DateServiceInner {
current: Cell<Option<(Date, Instant)>>,
/// Service for update Date and Instant periodically at 500 millis interval.
struct DateService {
current: Rc<Cell<(Date, Instant)>>,
handle: JoinHandle<()>,
}
impl DateServiceInner {
fn new() -> Self {
DateServiceInner {
current: Cell::new(None),
}
}
fn reset(&self) {
self.current.take();
}
fn update(&self) {
let now = Instant::now();
let date = Date::new();
self.current.set(Some((date, now)));
impl Drop for DateService {
fn drop(&mut self) {
// stop the timer update async task on drop.
self.handle.abort();
}
}
impl DateService {
fn new() -> Self {
DateService(Rc::new(DateServiceInner::new()))
}
// shared date and timer for DateService and update async task.
let current = Rc::new(Cell::new((Date::new(), Instant::now())));
let current_clone = Rc::clone(&current);
// spawn an async task sleep for 500 milli and update current date/timer in a loop.
// handle is used to stop the task on DateService drop.
let handle = actix_rt::spawn(async move {
#[cfg(test)]
let _notify = notify_on_drop::NotifyOnDrop::new();
fn check_date(&self) {
if self.0.current.get().is_none() {
self.0.update();
let mut interval = interval(Duration::from_millis(500));
loop {
let now = interval.tick().await;
let date = Date::new();
current_clone.set((date, now));
}
});
// periodic date update
let s = self.clone();
actix_rt::spawn(delay_for(Duration::from_millis(500)).then(move |_| {
s.0.reset();
future::ready(())
}));
}
DateService { current, handle }
}
fn now(&self) -> Instant {
self.check_date();
self.0.current.get().unwrap().1
self.current.get().1
}
fn set_date<F: FnMut(&Date)>(&self, mut f: F) {
self.check_date();
f(&self.0.current.get().unwrap().0);
f(&self.current.get().0);
}
}
// TODO: move to a util module for testing all spawn handle drop style tasks.
/// Test Module for checking the drop state of certain async tasks that are spawned
/// with `actix_rt::spawn`
///
/// The target task must explicitly generate `NotifyOnDrop` when spawn the task
#[cfg(test)]
mod notify_on_drop {
use std::cell::RefCell;
thread_local! {
static NOTIFY_DROPPED: RefCell<Option<bool>> = RefCell::new(None);
}
/// Check if the spawned task is dropped.
///
/// # Panics
/// Panics when there was no `NotifyOnDrop` instance on current thread.
pub(crate) fn is_dropped() -> bool {
NOTIFY_DROPPED.with(|bool| {
bool.borrow()
.expect("No NotifyOnDrop existed on current thread")
})
}
pub(crate) struct NotifyOnDrop;
impl NotifyOnDrop {
/// # Panic:
///
/// When construct multiple instances on any given thread.
pub(crate) fn new() -> Self {
NOTIFY_DROPPED.with(|bool| {
let mut bool = bool.borrow_mut();
if bool.is_some() {
panic!("NotifyOnDrop existed on current thread");
} else {
*bool = Some(false);
}
});
NotifyOnDrop
}
}
impl Drop for NotifyOnDrop {
fn drop(&mut self) {
NOTIFY_DROPPED.with(|bool| {
if let Some(b) = bool.borrow_mut().as_mut() {
*b = true;
}
});
}
}
}
@ -288,14 +325,67 @@ impl DateService {
mod tests {
use super::*;
// Test modifying the date from within the closure
// passed to `set_date`
#[test]
fn test_evil_date() {
let service = DateService::new();
// Make sure that `check_date` doesn't try to spawn a task
service.0.update();
service.set_date(|_| service.0.reset());
use actix_rt::{task::yield_now, time::sleep};
#[actix_rt::test]
async fn test_date_service_update() {
let settings = ServiceConfig::new(KeepAlive::Os, 0, 0, false, None);
yield_now().await;
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf1);
let now1 = settings.now();
sleep_until(Instant::now() + Duration::from_secs(2)).await;
yield_now().await;
let now2 = settings.now();
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf2);
assert_ne!(now1, now2);
assert_ne!(buf1, buf2);
drop(settings);
// Ensure the task will drop eventually
let mut times = 0;
while !notify_on_drop::is_dropped() {
sleep(Duration::from_millis(100)).await;
times += 1;
assert!(times < 10, "Timeout waiting for task drop");
}
}
#[actix_rt::test]
async fn test_date_service_drop() {
let service = Rc::new(DateService::new());
// yield so date service have a chance to register the spawned timer update task.
yield_now().await;
let clone1 = service.clone();
let clone2 = service.clone();
let clone3 = service.clone();
drop(clone1);
assert!(!notify_on_drop::is_dropped());
drop(clone2);
assert!(!notify_on_drop::is_dropped());
drop(clone3);
assert!(!notify_on_drop::is_dropped());
drop(service);
// Ensure the task will drop eventually
let mut times = 0;
while !notify_on_drop::is_dropped() {
sleep(Duration::from_millis(100)).await;
times += 1;
assert!(times < 10, "Timeout waiting for task drop");
}
}
#[test]

View File

@ -1,25 +1,38 @@
use std::future::Future;
use std::io::{self, Write};
use std::pin::Pin;
use std::task::{Context, Poll};
//! Stream decoders.
use actix_threadpool::{run, CpuFuture};
use brotli2::write::BrotliDecoder;
use std::{
future::Future,
io::{self, Write as _},
pin::Pin,
task::{Context, Poll},
};
use actix_rt::task::{spawn_blocking, JoinHandle};
use bytes::Bytes;
use flate2::write::{GzDecoder, ZlibDecoder};
use futures_core::{ready, Stream};
use super::Writer;
use crate::error::PayloadError;
use crate::http::header::{ContentEncoding, HeaderMap, CONTENT_ENCODING};
#[cfg(feature = "compress-gzip")]
use flate2::write::{GzDecoder, ZlibDecoder};
const INPLACE: usize = 2049;
#[cfg(feature = "compress-zstd")]
use zstd::stream::write::Decoder as ZstdDecoder;
pub struct Decoder<S> {
decoder: Option<ContentDecoder>,
stream: S,
eof: bool,
fut: Option<CpuFuture<(Option<Bytes>, ContentDecoder), io::Error>>,
use crate::{
encoding::Writer,
error::{BlockingError, PayloadError},
header::{ContentEncoding, HeaderMap, CONTENT_ENCODING},
};
const MAX_CHUNK_SIZE_DECODE_IN_PLACE: usize = 2049;
pin_project_lite::pin_project! {
pub struct Decoder<S> {
decoder: Option<ContentDecoder>,
#[pin]
stream: S,
eof: bool,
fut: Option<JoinHandle<Result<(Option<Bytes>, ContentDecoder), io::Error>>>,
}
}
impl<S> Decoder<S>
@ -30,17 +43,28 @@ where
#[inline]
pub fn new(stream: S, encoding: ContentEncoding) -> Decoder<S> {
let decoder = match encoding {
ContentEncoding::Br => Some(ContentDecoder::Br(Box::new(
BrotliDecoder::new(Writer::new()),
#[cfg(feature = "compress-brotli")]
ContentEncoding::Brotli => Some(ContentDecoder::Brotli(Box::new(
brotli::DecompressorWriter::new(Writer::new(), 8_096),
))),
#[cfg(feature = "compress-gzip")]
ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new(
ZlibDecoder::new(Writer::new()),
))),
ContentEncoding::Gzip => Some(ContentDecoder::Gzip(Box::new(
GzDecoder::new(Writer::new()),
#[cfg(feature = "compress-gzip")]
ContentEncoding::Gzip => Some(ContentDecoder::Gzip(Box::new(GzDecoder::new(
Writer::new(),
)))),
#[cfg(feature = "compress-zstd")]
ContentEncoding::Zstd => Some(ContentDecoder::Zstd(Box::new(
ZstdDecoder::new(Writer::new()).expect(
"Failed to create zstd decoder. This is a bug. \
Please report it to the actix-web repository.",
),
))),
_ => None,
};
Decoder {
decoder,
stream,
@ -53,15 +77,11 @@ where
#[inline]
pub fn from_headers(stream: S, headers: &HeaderMap) -> Decoder<S> {
// check content-encoding
let encoding = if let Some(enc) = headers.get(&CONTENT_ENCODING) {
if let Ok(enc) = enc.to_str() {
ContentEncoding::from(enc)
} else {
ContentEncoding::Identity
}
} else {
ContentEncoding::Identity
};
let encoding = headers
.get(&CONTENT_ENCODING)
.and_then(|val| val.to_str().ok())
.and_then(|x| x.parse().ok())
.unwrap_or(ContentEncoding::Identity);
Self::new(stream, encoding)
}
@ -69,55 +89,59 @@ where
impl<S> Stream for Decoder<S>
where
S: Stream<Item = Result<Bytes, PayloadError>> + Unpin,
S: Stream<Item = Result<Bytes, PayloadError>>,
{
type Item = Result<Bytes, PayloadError>;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.project();
loop {
if let Some(ref mut fut) = self.fut {
let (chunk, decoder) = match ready!(Pin::new(fut).poll(cx)) {
Ok(item) => item,
Err(e) => return Poll::Ready(Some(Err(e.into()))),
};
self.decoder = Some(decoder);
self.fut.take();
if let Some(ref mut fut) = this.fut {
let (chunk, decoder) =
ready!(Pin::new(fut).poll(cx)).map_err(|_| BlockingError)??;
*this.decoder = Some(decoder);
this.fut.take();
if let Some(chunk) = chunk {
return Poll::Ready(Some(Ok(chunk)));
}
}
if self.eof {
if *this.eof {
return Poll::Ready(None);
}
match Pin::new(&mut self.stream).poll_next(cx) {
Poll::Ready(Some(Err(err))) => return Poll::Ready(Some(Err(err))),
Poll::Ready(Some(Ok(chunk))) => {
if let Some(mut decoder) = self.decoder.take() {
if chunk.len() < INPLACE {
match ready!(this.stream.as_mut().poll_next(cx)) {
Some(Err(err)) => return Poll::Ready(Some(Err(err))),
Some(Ok(chunk)) => {
if let Some(mut decoder) = this.decoder.take() {
if chunk.len() < MAX_CHUNK_SIZE_DECODE_IN_PLACE {
let chunk = decoder.feed_data(chunk)?;
self.decoder = Some(decoder);
*this.decoder = Some(decoder);
if let Some(chunk) = chunk {
return Poll::Ready(Some(Ok(chunk)));
}
} else {
self.fut = Some(run(move || {
*this.fut = Some(spawn_blocking(move || {
let chunk = decoder.feed_data(chunk)?;
Ok((chunk, decoder))
}));
}
continue;
} else {
return Poll::Ready(Some(Ok(chunk)));
}
}
Poll::Ready(None) => {
self.eof = true;
return if let Some(mut decoder) = self.decoder.take() {
None => {
*this.eof = true;
return if let Some(mut decoder) = this.decoder.take() {
match decoder.feed_eof() {
Ok(Some(res)) => Poll::Ready(Some(Ok(res))),
Ok(None) => Poll::Ready(None),
@ -127,25 +151,32 @@ where
Poll::Ready(None)
};
}
Poll::Pending => break,
}
}
Poll::Pending
}
}
enum ContentDecoder {
#[cfg(feature = "compress-gzip")]
Deflate(Box<ZlibDecoder<Writer>>),
#[cfg(feature = "compress-gzip")]
Gzip(Box<GzDecoder<Writer>>),
Br(Box<BrotliDecoder<Writer>>),
#[cfg(feature = "compress-brotli")]
Brotli(Box<brotli::DecompressorWriter<Writer>>),
// We need explicit 'static lifetime here because ZstdDecoder need lifetime
// argument, and we use `spawn_blocking` in `Decoder::poll_next` that require `FnOnce() -> R + Send + 'static`
#[cfg(feature = "compress-zstd")]
Zstd(Box<ZstdDecoder<'static, Writer>>),
}
impl ContentDecoder {
fn feed_eof(&mut self) -> io::Result<Option<Bytes>> {
match self {
ContentDecoder::Br(ref mut decoder) => match decoder.flush() {
#[cfg(feature = "compress-brotli")]
ContentDecoder::Brotli(ref mut decoder) => match decoder.flush() {
Ok(()) => {
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
@ -154,7 +185,23 @@ impl ContentDecoder {
}
Err(e) => Err(e),
},
#[cfg(feature = "compress-gzip")]
ContentDecoder::Gzip(ref mut decoder) => match decoder.try_finish() {
Ok(_) => {
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
#[cfg(feature = "compress-gzip")]
ContentDecoder::Deflate(ref mut decoder) => match decoder.try_finish() {
Ok(_) => {
let b = decoder.get_mut().take();
if !b.is_empty() {
@ -165,7 +212,9 @@ impl ContentDecoder {
}
Err(e) => Err(e),
},
ContentDecoder::Deflate(ref mut decoder) => match decoder.try_finish() {
#[cfg(feature = "compress-zstd")]
ContentDecoder::Zstd(ref mut decoder) => match decoder.flush() {
Ok(_) => {
let b = decoder.get_mut().take();
if !b.is_empty() {
@ -181,10 +230,12 @@ impl ContentDecoder {
fn feed_data(&mut self, data: Bytes) -> io::Result<Option<Bytes>> {
match self {
ContentDecoder::Br(ref mut decoder) => match decoder.write_all(&data) {
#[cfg(feature = "compress-brotli")]
ContentDecoder::Brotli(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
@ -193,10 +244,13 @@ impl ContentDecoder {
}
Err(e) => Err(e),
},
#[cfg(feature = "compress-gzip")]
ContentDecoder::Gzip(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
@ -205,9 +259,27 @@ impl ContentDecoder {
}
Err(e) => Err(e),
},
#[cfg(feature = "compress-gzip")]
ContentDecoder::Deflate(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
#[cfg(feature = "compress-zstd")]
ContentDecoder::Zstd(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))

View File

@ -1,166 +1,207 @@
//! Stream encoder
use std::future::Future;
use std::io::{self, Write};
use std::pin::Pin;
use std::task::{Context, Poll};
//! Stream encoders.
use actix_threadpool::{run, CpuFuture};
use brotli2::write::BrotliEncoder;
use std::{
error::Error as StdError,
future::Future,
io::{self, Write as _},
pin::Pin,
task::{Context, Poll},
};
use actix_rt::task::{spawn_blocking, JoinHandle};
use bytes::Bytes;
use flate2::write::{GzEncoder, ZlibEncoder};
use derive_more::Display;
use futures_core::ready;
use pin_project::pin_project;
use pin_project_lite::pin_project;
use crate::body::{Body, BodySize, MessageBody, ResponseBody};
use crate::http::header::{ContentEncoding, CONTENT_ENCODING};
use crate::http::{HeaderValue, StatusCode};
use crate::{Error, ResponseHead};
#[cfg(feature = "compress-gzip")]
use flate2::write::{GzEncoder, ZlibEncoder};
#[cfg(feature = "compress-zstd")]
use zstd::stream::write::Encoder as ZstdEncoder;
use super::Writer;
use crate::{
body::{self, BodySize, MessageBody},
error::BlockingError,
header::{self, ContentEncoding, HeaderValue, CONTENT_ENCODING},
ResponseHead, StatusCode,
};
const INPLACE: usize = 1024;
const MAX_CHUNK_SIZE_ENCODE_IN_PLACE: usize = 1024;
#[pin_project]
pub struct Encoder<B> {
eof: bool,
#[pin]
body: EncoderBody<B>,
encoder: Option<ContentEncoder>,
fut: Option<CpuFuture<ContentEncoder, io::Error>>,
pin_project! {
pub struct Encoder<B> {
#[pin]
body: EncoderBody<B>,
encoder: Option<ContentEncoder>,
fut: Option<JoinHandle<Result<ContentEncoder, io::Error>>>,
eof: bool,
}
}
impl<B: MessageBody> Encoder<B> {
pub fn response(
encoding: ContentEncoding,
head: &mut ResponseHead,
body: ResponseBody<B>,
) -> ResponseBody<Encoder<B>> {
let can_encode = !(head.headers().contains_key(&CONTENT_ENCODING)
fn none() -> Self {
Encoder {
body: EncoderBody::None {
body: body::None::new(),
},
encoder: None,
fut: None,
eof: true,
}
}
pub fn response(encoding: ContentEncoding, head: &mut ResponseHead, body: B) -> Self {
// no need to compress an empty body
if matches!(body.size(), BodySize::None) {
return Self::none();
}
let should_encode = !(head.headers().contains_key(&CONTENT_ENCODING)
|| head.status == StatusCode::SWITCHING_PROTOCOLS
|| head.status == StatusCode::NO_CONTENT
|| encoding == ContentEncoding::Identity
|| encoding == ContentEncoding::Auto);
|| encoding == ContentEncoding::Identity);
let body = match body {
ResponseBody::Other(b) => match b {
Body::None => return ResponseBody::Other(Body::None),
Body::Empty => return ResponseBody::Other(Body::Empty),
Body::Bytes(buf) => {
if can_encode {
EncoderBody::Bytes(buf)
} else {
return ResponseBody::Other(Body::Bytes(buf));
}
}
Body::Message(stream) => EncoderBody::BoxedStream(stream),
},
ResponseBody::Body(stream) => EncoderBody::Stream(stream),
let body = match body.try_into_bytes() {
Ok(body) => EncoderBody::Full { body },
Err(body) => EncoderBody::Stream { body },
};
if can_encode {
// Modify response body only if encoder is not None
if let Some(enc) = ContentEncoder::encoder(encoding) {
if should_encode {
// wrap body only if encoder is feature-enabled
if let Some(enc) = ContentEncoder::select(encoding) {
update_head(encoding, head);
head.no_chunking(false);
return ResponseBody::Body(Encoder {
return Encoder {
body,
eof: false,
fut: None,
encoder: Some(enc),
});
fut: None,
eof: false,
};
}
}
ResponseBody::Body(Encoder {
Encoder {
body,
eof: false,
fut: None,
encoder: None,
})
fut: None,
eof: false,
}
}
}
#[pin_project(project = EncoderBodyProj)]
enum EncoderBody<B> {
Bytes(Bytes),
Stream(#[pin] B),
BoxedStream(Box<dyn MessageBody + Unpin>),
pin_project! {
#[project = EncoderBodyProj]
enum EncoderBody<B> {
None { body: body::None },
Full { body: Bytes },
Stream { #[pin] body: B },
}
}
impl<B: MessageBody> MessageBody for EncoderBody<B> {
impl<B> MessageBody for EncoderBody<B>
where
B: MessageBody,
{
type Error = EncoderError;
#[inline]
fn size(&self) -> BodySize {
match self {
EncoderBody::Bytes(ref b) => b.size(),
EncoderBody::Stream(ref b) => b.size(),
EncoderBody::BoxedStream(ref b) => b.size(),
EncoderBody::None { body } => body.size(),
EncoderBody::Full { body } => body.size(),
EncoderBody::Stream { body } => body.size(),
}
}
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
) -> Poll<Option<Result<Bytes, Self::Error>>> {
match self.project() {
EncoderBodyProj::Bytes(b) => {
if b.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(std::mem::take(b))))
}
EncoderBodyProj::None { body } => {
Pin::new(body).poll_next(cx).map_err(|err| match err {})
}
EncoderBodyProj::Stream(b) => b.poll_next(cx),
EncoderBodyProj::BoxedStream(ref mut b) => {
Pin::new(b.as_mut()).poll_next(cx)
EncoderBodyProj::Full { body } => {
Pin::new(body).poll_next(cx).map_err(|err| match err {})
}
EncoderBodyProj::Stream { body } => body
.poll_next(cx)
.map_err(|err| EncoderError::Body(err.into())),
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self>
where
Self: Sized,
{
match self {
EncoderBody::None { body } => Ok(body.try_into_bytes().unwrap()),
EncoderBody::Full { body } => Ok(body.try_into_bytes().unwrap()),
_ => Err(self),
}
}
}
impl<B: MessageBody> MessageBody for Encoder<B> {
impl<B> MessageBody for Encoder<B>
where
B: MessageBody,
{
type Error = EncoderError;
#[inline]
fn size(&self) -> BodySize {
if self.encoder.is_none() {
self.body.size()
} else {
if self.encoder.is_some() {
BodySize::Stream
} else {
self.body.size()
}
}
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
) -> Poll<Option<Result<Bytes, Self::Error>>> {
let mut this = self.project();
loop {
if *this.eof {
return Poll::Ready(None);
}
if let Some(ref mut fut) = this.fut {
let mut encoder = match ready!(Pin::new(fut).poll(cx)) {
Ok(item) => item,
Err(e) => return Poll::Ready(Some(Err(e.into()))),
};
let mut encoder = ready!(Pin::new(fut).poll(cx))
.map_err(|_| EncoderError::Blocking(BlockingError))?
.map_err(EncoderError::Io)?;
let chunk = encoder.take();
*this.encoder = Some(encoder);
this.fut.take();
if !chunk.is_empty() {
return Poll::Ready(Some(Ok(chunk)));
}
}
let result = this.body.as_mut().poll_next(cx);
let result = ready!(this.body.as_mut().poll_next(cx));
match result {
Poll::Ready(Some(Ok(chunk))) => {
Some(Err(err)) => return Poll::Ready(Some(Err(err))),
Some(Ok(chunk)) => {
if let Some(mut encoder) = this.encoder.take() {
if chunk.len() < INPLACE {
encoder.write(&chunk)?;
if chunk.len() < MAX_CHUNK_SIZE_ENCODE_IN_PLACE {
encoder.write(&chunk).map_err(EncoderError::Io)?;
let chunk = encoder.take();
*this.encoder = Some(encoder);
if !chunk.is_empty() {
return Poll::Ready(Some(Ok(chunk)));
}
} else {
*this.fut = Some(run(move || {
*this.fut = Some(spawn_blocking(move || {
encoder.write(&chunk)?;
Ok(encoder)
}));
@ -169,9 +210,11 @@ impl<B: MessageBody> MessageBody for Encoder<B> {
return Poll::Ready(Some(Ok(chunk)));
}
}
Poll::Ready(None) => {
None => {
if let Some(encoder) = this.encoder.take() {
let chunk = encoder.finish()?;
let chunk = encoder.finish().map_err(EncoderError::Io)?;
if chunk.is_empty() {
return Poll::Ready(None);
} else {
@ -182,39 +225,78 @@ impl<B: MessageBody> MessageBody for Encoder<B> {
return Poll::Ready(None);
}
}
val => return val,
}
}
}
#[inline]
fn try_into_bytes(mut self) -> Result<Bytes, Self>
where
Self: Sized,
{
if self.encoder.is_some() {
Err(self)
} else {
match self.body.try_into_bytes() {
Ok(body) => Ok(body),
Err(body) => {
self.body = body;
Err(self)
}
}
}
}
}
fn update_head(encoding: ContentEncoding, head: &mut ResponseHead) {
head.headers_mut().insert(
CONTENT_ENCODING,
HeaderValue::from_static(encoding.as_str()),
);
head.headers_mut()
.insert(header::CONTENT_ENCODING, encoding.to_header_value());
head.headers_mut()
.insert(header::VARY, HeaderValue::from_static("accept-encoding"));
head.no_chunking(false);
}
enum ContentEncoder {
#[cfg(feature = "compress-gzip")]
Deflate(ZlibEncoder<Writer>),
#[cfg(feature = "compress-gzip")]
Gzip(GzEncoder<Writer>),
Br(BrotliEncoder<Writer>),
#[cfg(feature = "compress-brotli")]
Brotli(Box<brotli::CompressorWriter<Writer>>),
// Wwe need explicit 'static lifetime here because ZstdEncoder needs a lifetime argument and we
// use `spawn_blocking` in `Encoder::poll_next` that requires `FnOnce() -> R + Send + 'static`.
#[cfg(feature = "compress-zstd")]
Zstd(ZstdEncoder<'static, Writer>),
}
impl ContentEncoder {
fn encoder(encoding: ContentEncoding) -> Option<Self> {
fn select(encoding: ContentEncoding) -> Option<Self> {
match encoding {
#[cfg(feature = "compress-gzip")]
ContentEncoding::Deflate => Some(ContentEncoder::Deflate(ZlibEncoder::new(
Writer::new(),
flate2::Compression::fast(),
))),
#[cfg(feature = "compress-gzip")]
ContentEncoding::Gzip => Some(ContentEncoder::Gzip(GzEncoder::new(
Writer::new(),
flate2::Compression::fast(),
))),
ContentEncoding::Br => {
Some(ContentEncoder::Br(BrotliEncoder::new(Writer::new(), 3)))
#[cfg(feature = "compress-brotli")]
ContentEncoding::Brotli => Some(ContentEncoder::Brotli(new_brotli_compressor())),
#[cfg(feature = "compress-zstd")]
ContentEncoding::Zstd => {
let encoder = ZstdEncoder::new(Writer::new(), 3).ok()?;
Some(ContentEncoder::Zstd(encoder))
}
_ => None,
}
}
@ -222,38 +304,60 @@ impl ContentEncoder {
#[inline]
pub(crate) fn take(&mut self) -> Bytes {
match *self {
ContentEncoder::Br(ref mut encoder) => encoder.get_mut().take(),
#[cfg(feature = "compress-brotli")]
ContentEncoder::Brotli(ref mut encoder) => encoder.get_mut().take(),
#[cfg(feature = "compress-gzip")]
ContentEncoder::Deflate(ref mut encoder) => encoder.get_mut().take(),
#[cfg(feature = "compress-gzip")]
ContentEncoder::Gzip(ref mut encoder) => encoder.get_mut().take(),
#[cfg(feature = "compress-zstd")]
ContentEncoder::Zstd(ref mut encoder) => encoder.get_mut().take(),
}
}
fn finish(self) -> Result<Bytes, io::Error> {
match self {
ContentEncoder::Br(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()),
#[cfg(feature = "compress-brotli")]
ContentEncoder::Brotli(mut encoder) => match encoder.flush() {
Ok(()) => Ok(encoder.into_inner().buf.freeze()),
Err(err) => Err(err),
},
#[cfg(feature = "compress-gzip")]
ContentEncoder::Gzip(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err),
},
#[cfg(feature = "compress-gzip")]
ContentEncoder::Deflate(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err),
},
#[cfg(feature = "compress-zstd")]
ContentEncoder::Zstd(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err),
},
}
}
fn write(&mut self, data: &[u8]) -> Result<(), io::Error> {
match *self {
ContentEncoder::Br(ref mut encoder) => match encoder.write_all(data) {
#[cfg(feature = "compress-brotli")]
ContentEncoder::Brotli(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()),
Err(err) => {
trace!("Error decoding br encoding: {}", err);
Err(err)
}
},
#[cfg(feature = "compress-gzip")]
ContentEncoder::Gzip(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()),
Err(err) => {
@ -261,6 +365,8 @@ impl ContentEncoder {
Err(err)
}
},
#[cfg(feature = "compress-gzip")]
ContentEncoder::Deflate(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()),
Err(err) => {
@ -268,6 +374,54 @@ impl ContentEncoder {
Err(err)
}
},
#[cfg(feature = "compress-zstd")]
ContentEncoder::Zstd(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()),
Err(err) => {
trace!("Error decoding ztsd encoding: {}", err);
Err(err)
}
},
}
}
}
#[cfg(feature = "compress-brotli")]
fn new_brotli_compressor() -> Box<brotli::CompressorWriter<Writer>> {
Box::new(brotli::CompressorWriter::new(
Writer::new(),
8 * 1024, // 32 KiB buffer
3, // BROTLI_PARAM_QUALITY
22, // BROTLI_PARAM_LGWIN
))
}
#[derive(Debug, Display)]
#[non_exhaustive]
pub enum EncoderError {
#[display(fmt = "body")]
Body(Box<dyn StdError>),
#[display(fmt = "blocking")]
Blocking(BlockingError),
#[display(fmt = "io")]
Io(io::Error),
}
impl StdError for EncoderError {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
match self {
EncoderError::Body(err) => Some(&**err),
EncoderError::Blocking(err) => Some(err),
EncoderError::Io(err) => Some(err),
}
}
}
impl From<EncoderError> for crate::Error {
fn from(err: EncoderError) -> Self {
crate::Error::new_encoder().with_cause(err)
}
}

View File

@ -1,4 +1,5 @@
//! Content-Encoding support
//! Content-Encoding support.
use std::io;
use bytes::{Bytes, BytesMut};
@ -9,6 +10,9 @@ mod encoder;
pub use self::decoder::Decoder;
pub use self::encoder::Encoder;
/// Special-purpose writer for streaming (de-)compression.
///
/// Pre-allocates 8KiB of capacity.
pub(self) struct Writer {
buf: BytesMut,
}

File diff suppressed because it is too large Load Diff

View File

@ -1,66 +1,127 @@
use std::any::{Any, TypeId};
use std::fmt;
use std::{
any::{Any, TypeId},
fmt,
};
use fxhash::FxHashMap;
use ahash::AHashMap;
/// A type map for request extensions.
///
/// All entries into this map must be owned types (or static references).
#[derive(Default)]
/// A type map of request extensions.
pub struct Extensions {
/// Use FxHasher with a std HashMap with for faster
/// lookups on the small `TypeId` (u64 equivalent) keys.
map: FxHashMap<TypeId, Box<dyn Any>>,
/// Use AHasher with a std HashMap with for faster lookups on the small `TypeId` keys.
map: AHashMap<TypeId, Box<dyn Any>>,
}
impl Extensions {
/// Create an empty `Extensions`.
/// Creates an empty `Extensions`.
#[inline]
pub fn new() -> Extensions {
Extensions {
map: FxHashMap::default(),
map: AHashMap::new(),
}
}
/// Insert a type into this `Extensions`.
/// Insert an item into the map.
///
/// If a extension of this type already existed, it will
/// be returned.
pub fn insert<T: 'static>(&mut self, val: T) {
self.map.insert(TypeId::of::<T>(), Box::new(val));
/// If an item of this type was already stored, it will be replaced and returned.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
/// assert_eq!(map.insert(""), None);
/// assert_eq!(map.insert(1u32), None);
/// assert_eq!(map.insert(2u32), Some(1u32));
/// assert_eq!(*map.get::<u32>().unwrap(), 2u32);
/// ```
pub fn insert<T: 'static>(&mut self, val: T) -> Option<T> {
self.map
.insert(TypeId::of::<T>(), Box::new(val))
.and_then(downcast_owned)
}
/// Check if container contains entry
/// Check if map contains an item of a given type.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
/// assert!(!map.contains::<u32>());
///
/// assert_eq!(map.insert(1u32), None);
/// assert!(map.contains::<u32>());
/// ```
pub fn contains<T: 'static>(&self) -> bool {
self.map.contains_key(&TypeId::of::<T>())
}
/// Get a reference to a type previously inserted on this `Extensions`.
/// Get a reference to an item of a given type.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
/// map.insert(1u32);
/// assert_eq!(map.get::<u32>(), Some(&1u32));
/// ```
pub fn get<T: 'static>(&self) -> Option<&T> {
self.map
.get(&TypeId::of::<T>())
.and_then(|boxed| boxed.downcast_ref())
}
/// Get a mutable reference to a type previously inserted on this `Extensions`.
/// Get a mutable reference to an item of a given type.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
/// map.insert(1u32);
/// assert_eq!(map.get_mut::<u32>(), Some(&mut 1u32));
/// ```
pub fn get_mut<T: 'static>(&mut self) -> Option<&mut T> {
self.map
.get_mut(&TypeId::of::<T>())
.and_then(|boxed| boxed.downcast_mut())
}
/// Remove a type from this `Extensions`.
/// Remove an item from the map of a given type.
///
/// If a extension of this type existed, it will be returned.
/// If an item of this type was already stored, it will be returned.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
///
/// map.insert(1u32);
/// assert_eq!(map.get::<u32>(), Some(&1u32));
///
/// assert_eq!(map.remove::<u32>(), Some(1u32));
/// assert!(!map.contains::<u32>());
/// ```
pub fn remove<T: 'static>(&mut self) -> Option<T> {
self.map
.remove(&TypeId::of::<T>())
.and_then(|boxed| boxed.downcast().ok().map(|boxed| *boxed))
self.map.remove(&TypeId::of::<T>()).and_then(downcast_owned)
}
/// Clear the `Extensions` of all inserted extensions.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
///
/// map.insert(1u32);
/// assert!(map.contains::<u32>());
///
/// map.clear();
/// assert!(!map.contains::<u32>());
/// ```
#[inline]
pub fn clear(&mut self) {
self.map.clear();
}
/// Extends self with the items from another `Extensions`.
pub fn extend(&mut self, other: Extensions) {
self.map.extend(other.map);
}
}
impl fmt::Debug for Extensions {
@ -69,6 +130,10 @@ impl fmt::Debug for Extensions {
}
}
fn downcast_owned<T: 'static>(boxed: Box<dyn Any>) -> Option<T> {
boxed.downcast().ok().map(|boxed| *boxed)
}
#[cfg(test)]
mod tests {
use super::*;
@ -108,6 +173,8 @@ mod tests {
#[test]
fn test_integers() {
static A: u32 = 8;
let mut map = Extensions::new();
map.insert::<i8>(8);
@ -120,6 +187,7 @@ mod tests {
map.insert::<u32>(32);
map.insert::<u64>(64);
map.insert::<u128>(128);
map.insert::<&'static u32>(&A);
assert!(map.get::<i8>().is_some());
assert!(map.get::<i16>().is_some());
assert!(map.get::<i32>().is_some());
@ -130,6 +198,7 @@ mod tests {
assert!(map.get::<u32>().is_some());
assert!(map.get::<u64>().is_some());
assert!(map.get::<u128>().is_some());
assert!(map.get::<&'static u32>().is_some());
}
#[test]
@ -178,4 +247,34 @@ mod tests {
assert_eq!(extensions.get::<bool>(), None);
assert_eq!(extensions.get(), Some(&MyType(10)));
}
#[test]
fn test_extend() {
#[derive(Debug, PartialEq)]
struct MyType(i32);
let mut extensions = Extensions::new();
extensions.insert(5i32);
extensions.insert(MyType(10));
let mut other = Extensions::new();
other.insert(15i32);
other.insert(20u8);
extensions.extend(other);
assert_eq!(extensions.get(), Some(&15i32));
assert_eq!(extensions.get_mut(), Some(&mut 15i32));
assert_eq!(extensions.remove::<i32>(), Some(15i32));
assert!(extensions.get::<i32>().is_none());
assert_eq!(extensions.get::<bool>(), None);
assert_eq!(extensions.get(), Some(&MyType(10)));
assert_eq!(extensions.get(), Some(&20u8));
assert_eq!(extensions.get_mut(), Some(&mut 20u8));
}
}

View File

@ -0,0 +1,426 @@
use std::{io, task::Poll};
use bytes::{Buf as _, Bytes, BytesMut};
macro_rules! byte (
($rdr:ident) => ({
if $rdr.len() > 0 {
let b = $rdr[0];
$rdr.advance(1);
b
} else {
return Poll::Pending
}
})
);
#[derive(Debug, PartialEq, Clone)]
pub(super) enum ChunkedState {
Size,
SizeLws,
Extension,
SizeLf,
Body,
BodyCr,
BodyLf,
EndCr,
EndLf,
End,
}
impl ChunkedState {
pub(super) fn step(
&self,
body: &mut BytesMut,
size: &mut u64,
buf: &mut Option<Bytes>,
) -> Poll<Result<ChunkedState, io::Error>> {
use self::ChunkedState::*;
match *self {
Size => ChunkedState::read_size(body, size),
SizeLws => ChunkedState::read_size_lws(body),
Extension => ChunkedState::read_extension(body),
SizeLf => ChunkedState::read_size_lf(body, *size),
Body => ChunkedState::read_body(body, size, buf),
BodyCr => ChunkedState::read_body_cr(body),
BodyLf => ChunkedState::read_body_lf(body),
EndCr => ChunkedState::read_end_cr(body),
EndLf => ChunkedState::read_end_lf(body),
End => Poll::Ready(Ok(ChunkedState::End)),
}
}
fn read_size(rdr: &mut BytesMut, size: &mut u64) -> Poll<Result<ChunkedState, io::Error>> {
let radix = 16;
let rem = match byte!(rdr) {
b @ b'0'..=b'9' => b - b'0',
b @ b'a'..=b'f' => b + 10 - b'a',
b @ b'A'..=b'F' => b + 10 - b'A',
b'\t' | b' ' => return Poll::Ready(Ok(ChunkedState::SizeLws)),
b';' => return Poll::Ready(Ok(ChunkedState::Extension)),
b'\r' => return Poll::Ready(Ok(ChunkedState::SizeLf)),
_ => {
return Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size line: Invalid Size",
)));
}
};
match size.checked_mul(radix) {
Some(n) => {
*size = n as u64;
*size += rem as u64;
Poll::Ready(Ok(ChunkedState::Size))
}
None => {
log::debug!("chunk size would overflow u64");
Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size line: Size is too big",
)))
}
}
}
fn read_size_lws(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
// LWS can follow the chunk size, but no more digits can come
b'\t' | b' ' => Poll::Ready(Ok(ChunkedState::SizeLws)),
b';' => Poll::Ready(Ok(ChunkedState::Extension)),
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size linear white space",
))),
}
}
fn read_extension(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
// strictly 0x20 (space) should be disallowed but we don't parse quoted strings here
0x00..=0x08 | 0x0a..=0x1f | 0x7f => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid character in chunk extension",
))),
_ => Poll::Ready(Ok(ChunkedState::Extension)), // no supported extensions
}
}
fn read_size_lf(rdr: &mut BytesMut, size: u64) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' if size > 0 => Poll::Ready(Ok(ChunkedState::Body)),
b'\n' if size == 0 => Poll::Ready(Ok(ChunkedState::EndCr)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size LF",
))),
}
}
fn read_body(
rdr: &mut BytesMut,
rem: &mut u64,
buf: &mut Option<Bytes>,
) -> Poll<Result<ChunkedState, io::Error>> {
log::trace!("Chunked read, remaining={:?}", rem);
let len = rdr.len() as u64;
if len == 0 {
Poll::Ready(Ok(ChunkedState::Body))
} else {
let slice;
if *rem > len {
slice = rdr.split().freeze();
*rem -= len;
} else {
slice = rdr.split_to(*rem as usize).freeze();
*rem = 0;
}
*buf = Some(slice);
if *rem > 0 {
Poll::Ready(Ok(ChunkedState::Body))
} else {
Poll::Ready(Ok(ChunkedState::BodyCr))
}
}
}
fn read_body_cr(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Poll::Ready(Ok(ChunkedState::BodyLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk body CR",
))),
}
}
fn read_body_lf(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' => Poll::Ready(Ok(ChunkedState::Size)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk body LF",
))),
}
}
fn read_end_cr(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Poll::Ready(Ok(ChunkedState::EndLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk end CR",
))),
}
}
fn read_end_lf(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' => Poll::Ready(Ok(ChunkedState::End)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk end LF",
))),
}
}
}
#[cfg(test)]
mod tests {
use actix_codec::Decoder as _;
use bytes::{Bytes, BytesMut};
use http::Method;
use crate::{
error::ParseError,
h1::decoder::{MessageDecoder, PayloadItem},
HttpMessage as _, Request,
};
macro_rules! parse_ready {
($e:expr) => {{
match MessageDecoder::<Request>::default().decode($e) {
Ok(Some((msg, _))) => msg,
Ok(_) => unreachable!("Eof during parsing http request"),
Err(err) => unreachable!("Error during parsing http request: {:?}", err),
}
}};
}
macro_rules! expect_parse_err {
($e:expr) => {{
match MessageDecoder::<Request>::default().decode($e) {
Err(err) => match err {
ParseError::Io(_) => unreachable!("Parse error expected"),
_ => {}
},
_ => unreachable!("Error expected"),
}
}};
}
#[test]
fn test_parse_chunked_payload_chunk_extension() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\
\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(msg.chunked().unwrap());
buf.extend(b"4;test\r\ndata\r\n4\r\nline\r\n0\r\n\r\n"); // test: test\r\n\r\n")
let chunk = pl.decode(&mut buf).unwrap().unwrap().chunk();
assert_eq!(chunk, Bytes::from_static(b"data"));
let chunk = pl.decode(&mut buf).unwrap().unwrap().chunk();
assert_eq!(chunk, Bytes::from_static(b"line"));
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert!(msg.eof());
}
#[test]
fn test_request_chunked() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let req = parse_ready!(&mut buf);
if let Ok(val) = req.chunked() {
assert!(val);
} else {
unreachable!("Error");
}
// intentional typo in "chunked"
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chnked\r\n\r\n",
);
expect_parse_err!(&mut buf);
}
#[test]
fn test_http_request_chunked_payload() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(req.chunked().unwrap());
buf.extend(b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n");
assert_eq!(
pl.decode(&mut buf).unwrap().unwrap().chunk().as_ref(),
b"data"
);
assert_eq!(
pl.decode(&mut buf).unwrap().unwrap().chunk().as_ref(),
b"line"
);
assert!(pl.decode(&mut buf).unwrap().unwrap().eof());
}
#[test]
fn test_http_request_chunked_payload_and_next_message() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(req.chunked().unwrap());
buf.extend(
b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n\
POST /test2 HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n"
.iter(),
);
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"data");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"line");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert!(msg.eof());
let (req, _) = reader.decode(&mut buf).unwrap().unwrap();
assert!(req.chunked().unwrap());
assert_eq!(*req.method(), Method::POST);
assert!(req.chunked().unwrap());
}
#[test]
fn test_http_request_chunked_payload_chunks() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(req.chunked().unwrap());
buf.extend(b"4\r\n1111\r\n");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"1111");
buf.extend(b"4\r\ndata\r");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"data");
buf.extend(b"\n4");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"\r");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"\n");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"li");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"li");
//trailers
//buf.feed_data("test: test\r\n");
//not_ready!(reader.parse(&mut buf, &mut readbuf));
buf.extend(b"ne\r\n0\r\n");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"ne");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"\r\n");
assert!(pl.decode(&mut buf).unwrap().unwrap().eof());
}
#[test]
fn chunk_extension_quoted() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
Host: localhost:8080\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
2;hello=b;one=\"1 2 3\"\r\n\
xx",
);
let mut reader = MessageDecoder::<Request>::default();
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
let chunk = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(chunk, PayloadItem::Chunk(Bytes::from_static(b"xx")));
}
#[test]
fn hrs_chunk_extension_invalid() {
let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\
Host: localhost:8080\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
2;x\nx\r\n\
4c\r\n\
0\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
let err = pl.decode(&mut buf).unwrap_err();
assert!(err
.to_string()
.contains("Invalid character in chunk extension"));
}
#[test]
fn hrs_chunk_size_overflow() {
let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\
Host: example.com\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
f0000000000000003\r\n\
abc\r\n\
0\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
let err = pl.decode(&mut buf).unwrap_err();
assert!(err
.to_string()
.contains("Invalid chunk size line: Size is too big"));
}
}

View File

@ -5,13 +5,15 @@ use bitflags::bitflags;
use bytes::{Bytes, BytesMut};
use http::{Method, Version};
use super::decoder::{PayloadDecoder, PayloadItem, PayloadType};
use super::{decoder, encoder, reserve_readbuf};
use super::{Message, MessageType};
use crate::body::BodySize;
use crate::config::ServiceConfig;
use crate::error::{ParseError, PayloadError};
use crate::message::{ConnectionType, RequestHeadType, ResponseHead};
use super::{
decoder::{self, PayloadDecoder, PayloadItem, PayloadType},
encoder, reserve_readbuf, Message, MessageType,
};
use crate::{
body::BodySize,
error::{ParseError, PayloadError},
ConnectionType, RequestHeadType, ResponseHead, ServiceConfig,
};
bitflags! {
struct Flags: u8 {
@ -120,7 +122,7 @@ impl Decoder for ClientCodec {
debug_assert!(!self.inner.payload.is_some(), "Payload decoder is set");
if let Some((req, payload)) = self.inner.decoder.decode(src)? {
if let Some(ctype) = req.ctype() {
if let Some(ctype) = req.conn_type() {
// do not use peer's keep-alive
self.inner.ctype = if ctype == ConnectionType::KeepAlive {
self.inner.ctype
@ -223,15 +225,3 @@ impl Encoder<Message<(RequestHeadType, BodySize)>> for ClientCodec {
Ok(())
}
}
pub struct Writer<'a>(pub &'a mut BytesMut);
impl<'a> io::Write for Writer<'a> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.extend_from_slice(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}

View File

@ -5,15 +5,13 @@ use bitflags::bitflags;
use bytes::BytesMut;
use http::{Method, Version};
use super::decoder::{PayloadDecoder, PayloadItem, PayloadType};
use super::{decoder, encoder};
use super::{Message, MessageType};
use crate::body::BodySize;
use crate::config::ServiceConfig;
use crate::error::ParseError;
use crate::message::ConnectionType;
use crate::request::Request;
use crate::response::Response;
use super::{
decoder::{self, PayloadDecoder, PayloadItem, PayloadType},
encoder, Message, MessageType,
};
use crate::{
body::BodySize, error::ParseError, ConnectionType, Request, Response, ServiceConfig,
};
bitflags! {
struct Flags: u8 {
@ -29,7 +27,7 @@ pub struct Codec {
decoder: decoder::MessageDecoder<Request>,
payload: Option<PayloadDecoder>,
version: Version,
ctype: ConnectionType,
conn_type: ConnectionType,
// encoder part
flags: Flags,
@ -58,37 +56,38 @@ impl Codec {
} else {
Flags::empty()
};
Codec {
config,
flags,
decoder: decoder::MessageDecoder::default(),
payload: None,
version: Version::HTTP_11,
ctype: ConnectionType::Close,
conn_type: ConnectionType::Close,
encoder: encoder::MessageEncoder::default(),
}
}
/// Check if request is upgrade.
#[inline]
/// Check if request is upgrade
pub fn upgrade(&self) -> bool {
self.ctype == ConnectionType::Upgrade
self.conn_type == ConnectionType::Upgrade
}
/// Check if last response is keep-alive.
#[inline]
/// Check if last response is keep-alive
pub fn keepalive(&self) -> bool {
self.ctype == ConnectionType::KeepAlive
self.conn_type == ConnectionType::KeepAlive
}
/// Check if keep-alive enabled on server level.
#[inline]
/// Check if keep-alive enabled on server level
pub fn keepalive_enabled(&self) -> bool {
self.flags.contains(Flags::KEEPALIVE_ENABLED)
}
/// Check last request's message type.
#[inline]
/// Check last request's message type
pub fn message_type(&self) -> MessageType {
if self.flags.contains(Flags::STREAM) {
MessageType::Stream
@ -110,8 +109,8 @@ impl Decoder for Codec {
type Error = ParseError;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
if self.payload.is_some() {
Ok(match self.payload.as_mut().unwrap().decode(src)? {
if let Some(ref mut payload) = self.payload {
Ok(match payload.decode(src)? {
Some(PayloadItem::Chunk(chunk)) => Some(Message::Chunk(Some(chunk))),
Some(PayloadItem::Eof) => {
self.payload.take();
@ -123,11 +122,11 @@ impl Decoder for Codec {
let head = req.head();
self.flags.set(Flags::HEAD, head.method == Method::HEAD);
self.version = head.version;
self.ctype = head.connection_type();
if self.ctype == ConnectionType::KeepAlive
self.conn_type = head.connection_type();
if self.conn_type == ConnectionType::KeepAlive
&& !self.flags.contains(Flags::KEEPALIVE_ENABLED)
{
self.ctype = ConnectionType::Close
self.conn_type = ConnectionType::Close
}
match payload {
PayloadType::None => self.payload = None,
@ -158,14 +157,14 @@ impl Encoder<Message<(Response<()>, BodySize)>> for Codec {
res.head_mut().version = self.version;
// connection status
self.ctype = if let Some(ct) = res.head().ctype() {
self.conn_type = if let Some(ct) = res.head().conn_type() {
if ct == ConnectionType::KeepAlive {
self.ctype
self.conn_type
} else {
ct
}
} else {
self.ctype
self.conn_type
};
// encode message
@ -176,10 +175,9 @@ impl Encoder<Message<(Response<()>, BodySize)>> for Codec {
self.flags.contains(Flags::STREAM),
self.version,
length,
self.ctype,
self.conn_type,
&self.config,
)?;
// self.headers_size = (dst.len() - len) as u32;
}
Message::Chunk(Some(bytes)) => {
self.encoder.encode_chunk(bytes.as_ref(), dst)?;
@ -188,6 +186,7 @@ impl Encoder<Message<(Response<()>, BodySize)>> for Codec {
self.encoder.encode_eof(dst)?;
}
}
Ok(())
}
}
@ -198,10 +197,10 @@ mod tests {
use http::Method;
use super::*;
use crate::httpmessage::HttpMessage;
use crate::HttpMessage as _;
#[test]
fn test_http_request_chunked_payload_and_next_message() {
#[actix_rt::test]
async fn test_http_request_chunked_payload_and_next_message() {
let mut codec = Codec::default();
let mut buf = BytesMut::from(

View File

@ -1,20 +1,17 @@
use std::convert::TryFrom;
use std::io;
use std::marker::PhantomData;
use std::task::Poll;
use std::{convert::TryFrom, io, marker::PhantomData, mem::MaybeUninit, task::Poll};
use actix_codec::Decoder;
use bytes::{Buf, Bytes, BytesMut};
use http::header::{HeaderName, HeaderValue};
use http::{header, Method, StatusCode, Uri, Version};
use bytes::{Bytes, BytesMut};
use http::{
header::{self, HeaderName, HeaderValue},
Method, StatusCode, Uri, Version,
};
use log::{debug, error, trace};
use crate::error::ParseError;
use crate::header::HeaderMap;
use crate::message::{ConnectionType, ResponseHead};
use crate::request::Request;
use super::chunked::ChunkedState;
use crate::{error::ParseError, header::HeaderMap, ConnectionType, Request, ResponseHead};
const MAX_BUFFER_SIZE: usize = 131_072;
pub(crate) const MAX_BUFFER_SIZE: usize = 131_072;
const MAX_HEADERS: usize = 96;
/// Incoming message decoder
@ -50,7 +47,7 @@ pub(crate) enum PayloadLength {
}
pub(crate) trait MessageType: Sized {
fn set_connection_type(&mut self, ctype: Option<ConnectionType>);
fn set_connection_type(&mut self, conn_type: Option<ConnectionType>);
fn set_expect(&mut self);
@ -67,14 +64,14 @@ pub(crate) trait MessageType: Sized {
let mut has_upgrade_websocket = false;
let mut expect = false;
let mut chunked = false;
let mut seen_te = false;
let mut content_length = None;
{
let headers = self.headers_mut();
for idx in raw_headers.iter() {
let name =
HeaderName::from_bytes(&slice[idx.name.0..idx.name.1]).unwrap();
let name = HeaderName::from_bytes(&slice[idx.name.0..idx.name.1]).unwrap();
// SAFETY: httparse already checks header value is only visible ASCII bytes
// from_maybe_shared_unchecked contains debug assertions so they are omitted here
@ -85,8 +82,17 @@ pub(crate) trait MessageType: Sized {
};
match name {
header::CONTENT_LENGTH => {
if let Ok(s) = value.to_str() {
header::CONTENT_LENGTH if content_length.is_some() => {
debug!("multiple Content-Length");
return Err(ParseError::Header);
}
header::CONTENT_LENGTH => match value.to_str() {
Ok(s) if s.trim().starts_with('+') => {
debug!("illegal Content-Length: {:?}", s);
return Err(ParseError::Header);
}
Ok(s) => {
if let Ok(len) = s.parse::<u64>() {
if len != 0 {
content_length = Some(len);
@ -95,22 +101,38 @@ pub(crate) trait MessageType: Sized {
debug!("illegal Content-Length: {:?}", s);
return Err(ParseError::Header);
}
} else {
}
Err(_) => {
debug!("illegal Content-Length: {:?}", value);
return Err(ParseError::Header);
}
}
},
// transfer-encoding
header::TRANSFER_ENCODING if seen_te => {
debug!("multiple Transfer-Encoding not allowed");
return Err(ParseError::Header);
}
header::TRANSFER_ENCODING => {
if let Ok(s) = value.to_str().map(|s| s.trim()) {
chunked = s.eq_ignore_ascii_case("chunked");
seen_te = true;
if let Ok(s) = value.to_str().map(str::trim) {
if s.eq_ignore_ascii_case("chunked") {
chunked = true;
} else if s.eq_ignore_ascii_case("identity") {
// allow silently since multiple TE headers are already checked
} else {
debug!("illegal Transfer-Encoding: {:?}", s);
return Err(ParseError::Header);
}
} else {
return Err(ParseError::Header);
}
}
// connection keep-alive state
header::CONNECTION => {
ka = if let Ok(conn) = value.to_str().map(|conn| conn.trim()) {
ka = if let Ok(conn) = value.to_str().map(str::trim) {
if conn.eq_ignore_ascii_case("keep-alive") {
Some(ConnectionType::KeepAlive)
} else if conn.eq_ignore_ascii_case("close") {
@ -125,7 +147,7 @@ pub(crate) trait MessageType: Sized {
};
}
header::UPGRADE => {
if let Ok(val) = value.to_str().map(|val| val.trim()) {
if let Ok(val) = value.to_str().map(str::trim) {
if val.eq_ignore_ascii_case("websocket") {
has_upgrade_websocket = true;
}
@ -137,7 +159,7 @@ pub(crate) trait MessageType: Sized {
expect = true;
}
}
_ => (),
_ => {}
}
headers.append(name, value);
@ -148,7 +170,7 @@ pub(crate) trait MessageType: Sized {
self.set_expect()
}
// https://tools.ietf.org/html/rfc7230#section-3.3.3
// https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.3
if chunked {
// Chunked encoding
Ok(PayloadLength::Payload(PayloadType::Payload(
@ -168,8 +190,8 @@ pub(crate) trait MessageType: Sized {
}
impl MessageType for Request {
fn set_connection_type(&mut self, ctype: Option<ConnectionType>) {
if let Some(ctype) = ctype {
fn set_connection_type(&mut self, conn_type: Option<ConnectionType>) {
if let Some(ctype) = conn_type {
self.head_mut().set_connection_type(ctype);
}
}
@ -186,10 +208,17 @@ impl MessageType for Request {
let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY;
let (len, method, uri, ver, h_len) = {
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = EMPTY_HEADER_ARRAY;
// SAFETY:
// Create an uninitialized array of `MaybeUninit`. The `assume_init` is
// safe because the type we are claiming to have initialized here is a
// bunch of `MaybeUninit`s, which do not require initialization.
let mut parsed = unsafe {
MaybeUninit::<[MaybeUninit<httparse::Header<'_>>; MAX_HEADERS]>::uninit()
.assume_init()
};
let mut req = httparse::Request::new(&mut parsed);
match req.parse(src)? {
let mut req = httparse::Request::new(&mut []);
match req.parse_with_uninit_headers(src, &mut parsed)? {
httparse::Status::Complete(len) => {
let method = Method::from_bytes(req.method.unwrap().as_bytes())
.map_err(|_| ParseError::Method)?;
@ -203,7 +232,15 @@ impl MessageType for Request {
(len, method, uri, version, req.headers.len())
}
httparse::Status::Partial => return Ok(None),
httparse::Status::Partial => {
return if src.len() >= MAX_BUFFER_SIZE {
trace!("MAX_BUFFER_SIZE unprocessed data reached, closing");
Err(ParseError::TooLarge)
} else {
// Return None to notify more read are needed for parsing request
Ok(None)
};
}
}
};
@ -216,15 +253,12 @@ impl MessageType for Request {
let decoder = match length {
PayloadLength::Payload(pl) => pl,
PayloadLength::UpgradeWebSocket => {
// upgrade(websocket)
// upgrade (WebSocket)
PayloadType::Stream(PayloadDecoder::eof())
}
PayloadLength::None => {
if method == Method::CONNECT {
PayloadType::Stream(PayloadDecoder::eof())
} else if src.len() >= MAX_BUFFER_SIZE {
trace!("MAX_BUFFER_SIZE unprocessed data reached, closing");
return Err(ParseError::TooLarge);
} else {
PayloadType::None
}
@ -241,8 +275,8 @@ impl MessageType for Request {
}
impl MessageType for ResponseHead {
fn set_connection_type(&mut self, ctype: Option<ConnectionType>) {
if let Some(ctype) = ctype {
fn set_connection_type(&mut self, conn_type: Option<ConnectionType>) {
if let Some(ctype) = conn_type {
ResponseHead::set_connection_type(self, ctype);
}
}
@ -273,7 +307,14 @@ impl MessageType for ResponseHead {
(len, version, status, res.headers.len())
}
httparse::Status::Partial => return Ok(None),
httparse::Status::Partial => {
return if src.len() >= MAX_BUFFER_SIZE {
error!("MAX_BUFFER_SIZE unprocessed data reached, closing");
Err(ParseError::TooLarge)
} else {
Ok(None)
}
}
}
};
@ -289,9 +330,6 @@ impl MessageType for ResponseHead {
} else if status == StatusCode::SWITCHING_PROTOCOLS {
// switching protocol or connect
PayloadType::Stream(PayloadDecoder::eof())
} else if src.len() >= MAX_BUFFER_SIZE {
error!("MAX_BUFFER_SIZE unprocessed data reached, closing");
return Err(ParseError::TooLarge);
} else {
// for HTTP/1.0 read to eof and close connection
if msg.version == Version::HTTP_10 {
@ -342,34 +380,36 @@ impl HeaderIndex {
}
#[derive(Debug, Clone, PartialEq)]
/// Http payload item
/// Chunk type yielded while decoding a payload.
pub enum PayloadItem {
Chunk(Bytes),
Eof,
}
/// Decoders to handle different Transfer-Encodings.
/// Decoder that can handle different payload types.
///
/// If a message body does not include a Transfer-Encoding, it *should*
/// include a Content-Length header.
/// If a message body does not use `Transfer-Encoding`, it should include a `Content-Length`.
#[derive(Debug, Clone, PartialEq)]
pub struct PayloadDecoder {
kind: Kind,
}
impl PayloadDecoder {
/// Constructs a fixed-length payload decoder.
pub fn length(x: u64) -> PayloadDecoder {
PayloadDecoder {
kind: Kind::Length(x),
}
}
/// Constructs a chunked encoding decoder.
pub fn chunked() -> PayloadDecoder {
PayloadDecoder {
kind: Kind::Chunked(ChunkedState::Size, 0),
}
}
/// Creates an decoder that yields chunks until the stream returns EOF.
pub fn eof() -> PayloadDecoder {
PayloadDecoder { kind: Kind::Eof }
}
@ -377,40 +417,27 @@ impl PayloadDecoder {
#[derive(Debug, Clone, PartialEq)]
enum Kind {
/// A Reader used when a Content-Length header is passed with a positive
/// integer.
/// A reader used when a `Content-Length` header is passed with a positive integer.
Length(u64),
/// A Reader used when Transfer-Encoding is `chunked`.
Chunked(ChunkedState, u64),
/// A Reader used for responses that don't indicate a length or chunked.
///
/// Note: This should only used for `Response`s. It is illegal for a
/// `Request` to be made with both `Content-Length` and
/// `Transfer-Encoding: chunked` missing, as explained from the spec:
///
/// > If a Transfer-Encoding header field is present in a response and
/// > the chunked transfer coding is not the final encoding, the
/// > message body length is determined by reading the connection until
/// > it is closed by the server. If a Transfer-Encoding header field
/// > is present in a request and the chunked transfer coding is not
/// > the final encoding, the message body length cannot be determined
/// > reliably; the server MUST respond with the 400 (Bad Request)
/// > status code and then close the connection.
Eof,
}
#[derive(Debug, PartialEq, Clone)]
enum ChunkedState {
Size,
SizeLws,
Extension,
SizeLf,
Body,
BodyCr,
BodyLf,
EndCr,
EndLf,
End,
/// A reader used when `Transfer-Encoding` is `chunked`.
Chunked(ChunkedState, u64),
/// A reader used for responses that don't indicate a length or chunked.
///
/// Note: This should only used for `Response`s. It is illegal for a `Request` to be made
/// without either of `Content-Length` and `Transfer-Encoding: chunked` missing, as explained
/// in [RFC 7230 §3.3.3]:
///
/// > If a Transfer-Encoding header field is present in a response and the chunked transfer
/// > coding is not the final encoding, the message body length is determined by reading the
/// > connection until it is closed by the server. If a Transfer-Encoding header field is
/// > present in a request and the chunked transfer coding is not the final encoding, the
/// > message body length cannot be determined reliably; the server MUST respond with the 400
/// > (Bad Request) status code and then close the connection.
///
/// [RFC 7230 §3.3.3]: https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.3
Eof,
}
impl Decoder for PayloadDecoder {
@ -439,27 +466,33 @@ impl Decoder for PayloadDecoder {
Ok(Some(PayloadItem::Chunk(buf)))
}
}
Kind::Chunked(ref mut state, ref mut size) => {
loop {
let mut buf = None;
// advances the chunked state
*state = match state.step(src, size, &mut buf) {
Poll::Pending => return Ok(None),
Poll::Ready(Ok(state)) => state,
Poll::Ready(Err(e)) => return Err(e),
};
if *state == ChunkedState::End {
trace!("End of chunked stream");
return Ok(Some(PayloadItem::Eof));
}
if let Some(buf) = buf {
return Ok(Some(PayloadItem::Chunk(buf)));
}
if src.is_empty() {
return Ok(None);
}
}
}
Kind::Eof => {
if src.is_empty() {
Ok(None)
@ -471,201 +504,40 @@ impl Decoder for PayloadDecoder {
}
}
macro_rules! byte (
($rdr:ident) => ({
if $rdr.len() > 0 {
let b = $rdr[0];
$rdr.advance(1);
b
} else {
return Poll::Pending
}
})
);
impl ChunkedState {
fn step(
&self,
body: &mut BytesMut,
size: &mut u64,
buf: &mut Option<Bytes>,
) -> Poll<Result<ChunkedState, io::Error>> {
use self::ChunkedState::*;
match *self {
Size => ChunkedState::read_size(body, size),
SizeLws => ChunkedState::read_size_lws(body),
Extension => ChunkedState::read_extension(body),
SizeLf => ChunkedState::read_size_lf(body, size),
Body => ChunkedState::read_body(body, size, buf),
BodyCr => ChunkedState::read_body_cr(body),
BodyLf => ChunkedState::read_body_lf(body),
EndCr => ChunkedState::read_end_cr(body),
EndLf => ChunkedState::read_end_lf(body),
End => Poll::Ready(Ok(ChunkedState::End)),
}
}
fn read_size(
rdr: &mut BytesMut,
size: &mut u64,
) -> Poll<Result<ChunkedState, io::Error>> {
let radix = 16;
match byte!(rdr) {
b @ b'0'..=b'9' => {
*size *= radix;
*size += u64::from(b - b'0');
}
b @ b'a'..=b'f' => {
*size *= radix;
*size += u64::from(b + 10 - b'a');
}
b @ b'A'..=b'F' => {
*size *= radix;
*size += u64::from(b + 10 - b'A');
}
b'\t' | b' ' => return Poll::Ready(Ok(ChunkedState::SizeLws)),
b';' => return Poll::Ready(Ok(ChunkedState::Extension)),
b'\r' => return Poll::Ready(Ok(ChunkedState::SizeLf)),
_ => {
return Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size line: Invalid Size",
)));
}
}
Poll::Ready(Ok(ChunkedState::Size))
}
fn read_size_lws(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
trace!("read_size_lws");
match byte!(rdr) {
// LWS can follow the chunk size, but no more digits can come
b'\t' | b' ' => Poll::Ready(Ok(ChunkedState::SizeLws)),
b';' => Poll::Ready(Ok(ChunkedState::Extension)),
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size linear white space",
))),
}
}
fn read_extension(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
_ => Poll::Ready(Ok(ChunkedState::Extension)), // no supported extensions
}
}
fn read_size_lf(
rdr: &mut BytesMut,
size: &mut u64,
) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' if *size > 0 => Poll::Ready(Ok(ChunkedState::Body)),
b'\n' if *size == 0 => Poll::Ready(Ok(ChunkedState::EndCr)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size LF",
))),
}
}
fn read_body(
rdr: &mut BytesMut,
rem: &mut u64,
buf: &mut Option<Bytes>,
) -> Poll<Result<ChunkedState, io::Error>> {
trace!("Chunked read, remaining={:?}", rem);
let len = rdr.len() as u64;
if len == 0 {
Poll::Ready(Ok(ChunkedState::Body))
} else {
let slice;
if *rem > len {
slice = rdr.split().freeze();
*rem -= len;
} else {
slice = rdr.split_to(*rem as usize).freeze();
*rem = 0;
}
*buf = Some(slice);
if *rem > 0 {
Poll::Ready(Ok(ChunkedState::Body))
} else {
Poll::Ready(Ok(ChunkedState::BodyCr))
}
}
}
fn read_body_cr(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Poll::Ready(Ok(ChunkedState::BodyLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk body CR",
))),
}
}
fn read_body_lf(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' => Poll::Ready(Ok(ChunkedState::Size)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk body LF",
))),
}
}
fn read_end_cr(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Poll::Ready(Ok(ChunkedState::EndLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk end CR",
))),
}
}
fn read_end_lf(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' => Poll::Ready(Ok(ChunkedState::End)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk end LF",
))),
}
}
}
#[cfg(test)]
mod tests {
use bytes::{Bytes, BytesMut};
use http::{Method, Version};
use super::*;
use crate::error::ParseError;
use crate::http::header::{HeaderName, SET_COOKIE};
use crate::httpmessage::HttpMessage;
use crate::{
error::ParseError,
header::{HeaderName, SET_COOKIE},
HttpMessage as _,
};
impl PayloadType {
fn unwrap(self) -> PayloadDecoder {
pub(crate) fn unwrap(self) -> PayloadDecoder {
match self {
PayloadType::Payload(pl) => pl,
_ => panic!(),
}
}
fn is_unhandled(&self) -> bool {
pub(crate) fn is_unhandled(&self) -> bool {
matches!(self, PayloadType::Stream(_))
}
}
impl PayloadItem {
fn chunk(self) -> Bytes {
pub(crate) fn chunk(self) -> Bytes {
match self {
PayloadItem::Chunk(chunk) => chunk,
_ => panic!("error"),
}
}
fn eof(&self) -> bool {
pub(crate) fn eof(&self) -> bool {
matches!(*self, PayloadItem::Eof)
}
}
@ -685,7 +557,7 @@ mod tests {
match MessageDecoder::<Request>::default().decode($e) {
Err(err) => match err {
ParseError::Io(_) => unreachable!("Parse error expected"),
_ => (),
_ => {}
},
_ => unreachable!("Error expected"),
}
@ -734,8 +606,7 @@ mod tests {
#[test]
fn test_parse_body() {
let mut buf =
BytesMut::from("GET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody");
let mut buf = BytesMut::from("GET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody");
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
@ -751,8 +622,7 @@ mod tests {
#[test]
fn test_parse_body_crlf() {
let mut buf =
BytesMut::from("\r\nGET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody");
let mut buf = BytesMut::from("\r\nGET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody");
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
@ -821,8 +691,8 @@ mod tests {
.get_all(SET_COOKIE)
.map(|v| v.to_str().unwrap().to_owned())
.collect();
assert_eq!(val[1], "c1=cookie1");
assert_eq!(val[0], "c2=cookie2");
assert_eq!(val[0], "c1=cookie1");
assert_eq!(val[1], "c2=cookie2");
}
#[test]
@ -958,34 +828,6 @@ mod tests {
assert!(req.upgrade());
}
#[test]
fn test_request_chunked() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let req = parse_ready!(&mut buf);
if let Ok(val) = req.chunked() {
assert!(val);
} else {
unreachable!("Error");
}
// intentional typo in "chunked"
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chnked\r\n\r\n",
);
let req = parse_ready!(&mut buf);
if let Ok(val) = req.chunked() {
assert!(!val);
} else {
unreachable!("Error");
}
}
#[test]
fn test_headers_content_length_err_1() {
let mut buf = BytesMut::from(
@ -1103,128 +945,9 @@ mod tests {
expect_parse_err!(&mut buf);
}
#[test]
fn test_http_request_chunked_payload() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(req.chunked().unwrap());
buf.extend(b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n");
assert_eq!(
pl.decode(&mut buf).unwrap().unwrap().chunk().as_ref(),
b"data"
);
assert_eq!(
pl.decode(&mut buf).unwrap().unwrap().chunk().as_ref(),
b"line"
);
assert!(pl.decode(&mut buf).unwrap().unwrap().eof());
}
#[test]
fn test_http_request_chunked_payload_and_next_message() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(req.chunked().unwrap());
buf.extend(
b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n\
POST /test2 HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n"
.iter(),
);
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"data");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"line");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert!(msg.eof());
let (req, _) = reader.decode(&mut buf).unwrap().unwrap();
assert!(req.chunked().unwrap());
assert_eq!(*req.method(), Method::POST);
assert!(req.chunked().unwrap());
}
#[test]
fn test_http_request_chunked_payload_chunks() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(req.chunked().unwrap());
buf.extend(b"4\r\n1111\r\n");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"1111");
buf.extend(b"4\r\ndata\r");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"data");
buf.extend(b"\n4");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"\r");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"\n");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"li");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"li");
//trailers
//buf.feed_data("test: test\r\n");
//not_ready!(reader.parse(&mut buf, &mut readbuf));
buf.extend(b"ne\r\n0\r\n");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"ne");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"\r\n");
assert!(pl.decode(&mut buf).unwrap().unwrap().eof());
}
#[test]
fn test_parse_chunked_payload_chunk_extension() {
let mut buf = BytesMut::from(
&"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n"[..],
);
let mut reader = MessageDecoder::<Request>::default();
let (msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(msg.chunked().unwrap());
buf.extend(b"4;test\r\ndata\r\n4\r\nline\r\n0\r\n\r\n"); // test: test\r\n\r\n")
let chunk = pl.decode(&mut buf).unwrap().unwrap().chunk();
assert_eq!(chunk, Bytes::from_static(b"data"));
let chunk = pl.decode(&mut buf).unwrap().unwrap().chunk();
assert_eq!(chunk, Bytes::from_static(b"line"));
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert!(msg.eof());
}
#[test]
fn test_response_http10_read_until_eof() {
let mut buf = BytesMut::from(&"HTTP/1.0 200 Ok\r\n\r\ntest data"[..]);
let mut buf = BytesMut::from("HTTP/1.0 200 Ok\r\n\r\ntest data");
let mut reader = MessageDecoder::<ResponseHead>::default();
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
@ -1233,4 +956,84 @@ mod tests {
let chunk = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(chunk, PayloadItem::Chunk(Bytes::from_static(b"test data")));
}
#[test]
fn hrs_multiple_content_length() {
let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\
Host: example.com\r\n\
Content-Length: 4\r\n\
Content-Length: 2\r\n\
\r\n\
abcd",
);
expect_parse_err!(&mut buf);
}
#[test]
fn hrs_content_length_plus() {
let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\
Host: example.com\r\n\
Content-Length: +3\r\n\
\r\n\
000",
);
expect_parse_err!(&mut buf);
}
#[test]
fn hrs_unknown_transfer_encoding() {
let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\
Host: example.com\r\n\
Transfer-Encoding: JUNK\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
5\r\n\
hello\r\n\
0",
);
expect_parse_err!(&mut buf);
}
#[test]
fn hrs_multiple_transfer_encoding() {
let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\
Host: example.com\r\n\
Content-Length: 51\r\n\
Transfer-Encoding: identity\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
0\r\n\
\r\n\
GET /forbidden HTTP/1.1\r\n\
Host: example.com\r\n\r\n",
);
expect_parse_err!(&mut buf);
}
#[test]
fn transfer_encoding_agrees() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
Host: example.com\r\n\
Content-Length: 3\r\n\
Transfer-Encoding: identity\r\n\
\r\n\
0\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
let chunk = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(chunk, PayloadItem::Chunk(Bytes::from_static(b"0\r\n")));
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,27 +1,29 @@
use std::io::Write;
use std::marker::PhantomData;
use std::ptr::copy_nonoverlapping;
use std::slice::from_raw_parts_mut;
use std::{cmp, io};
use std::{
cmp,
io::{self, Write as _},
marker::PhantomData,
ptr::copy_nonoverlapping,
slice::from_raw_parts_mut,
};
use bytes::{BufMut, BytesMut};
use crate::body::BodySize;
use crate::config::ServiceConfig;
use crate::header::map;
use crate::helpers;
use crate::http::header::{CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING};
use crate::http::{HeaderMap, StatusCode, Version};
use crate::message::{ConnectionType, RequestHeadType};
use crate::response::Response;
use crate::{
body::BodySize,
header::{
map::Value, HeaderMap, HeaderName, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING,
},
helpers, ConnectionType, RequestHeadType, Response, ServiceConfig, StatusCode, Version,
};
const AVERAGE_HEADER_SIZE: usize = 30;
#[derive(Debug)]
pub(crate) struct MessageEncoder<T: MessageType> {
#[allow(dead_code)]
pub length: BodySize,
pub te: TransferEncoding,
_t: PhantomData<T>,
_phantom: PhantomData<T>,
}
impl<T: MessageType> Default for MessageEncoder<T> {
@ -29,7 +31,7 @@ impl<T: MessageType> Default for MessageEncoder<T> {
MessageEncoder {
length: BodySize::None,
te: TransferEncoding::empty(),
_t: PhantomData,
_phantom: PhantomData,
}
}
}
@ -54,7 +56,7 @@ pub(crate) trait MessageType: Sized {
dst: &mut BytesMut,
version: Version,
mut length: BodySize,
ctype: ConnectionType,
conn_type: ConnectionType,
config: &ServiceConfig,
) -> io::Result<()> {
let chunked = self.chunked();
@ -64,19 +66,33 @@ pub(crate) trait MessageType: Sized {
// Content length
if let Some(status) = self.status() {
match status {
StatusCode::NO_CONTENT
| StatusCode::CONTINUE
| StatusCode::PROCESSING => length = BodySize::None,
StatusCode::SWITCHING_PROTOCOLS => {
StatusCode::CONTINUE
| StatusCode::SWITCHING_PROTOCOLS
| StatusCode::PROCESSING
| StatusCode::NO_CONTENT => {
// skip content-length and transfer-encoding headers
// see https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.1
// and https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.2
skip_len = true;
length = BodySize::Stream;
length = BodySize::None
}
_ => (),
StatusCode::NOT_MODIFIED => {
// 304 responses should never have a body but should retain a manually set
// content-length header
// see https://datatracker.ietf.org/doc/html/rfc7232#section-4.1
skip_len = false;
length = BodySize::None;
}
_ => {}
}
}
match length {
BodySize::Stream => {
if chunked {
skip_len = true;
if camel_case {
dst.put_slice(b"\r\nTransfer-Encoding: chunked\r\n")
} else {
@ -87,19 +103,14 @@ pub(crate) trait MessageType: Sized {
dst.put_slice(b"\r\n");
}
}
BodySize::Empty => {
if camel_case {
dst.put_slice(b"\r\nContent-Length: 0\r\n");
} else {
dst.put_slice(b"\r\ncontent-length: 0\r\n");
}
}
BodySize::Sized(0) if camel_case => dst.put_slice(b"\r\nContent-Length: 0\r\n"),
BodySize::Sized(0) => dst.put_slice(b"\r\ncontent-length: 0\r\n"),
BodySize::Sized(len) => helpers::write_content_length(len, dst),
BodySize::None => dst.put_slice(b"\r\n"),
}
// Connection
match ctype {
match conn_type {
ConnectionType::Upgrade => dst.put_slice(b"connection: upgrade\r\n"),
ConnectionType::KeepAlive if version < Version::HTTP_11 => {
if camel_case {
@ -115,24 +126,14 @@ pub(crate) trait MessageType: Sized {
dst.put_slice(b"connection: close\r\n")
}
}
_ => (),
_ => {}
}
// merging headers from head and extra headers. HeaderMap::new() does not allocate.
let empty_headers = HeaderMap::new();
let extra_headers = self.extra_headers().unwrap_or(&empty_headers);
let headers = self
.headers()
.inner
.iter()
.filter(|(name, _)| !extra_headers.contains_key(*name))
.chain(extra_headers.inner.iter());
// write headers
let mut has_date = false;
let mut buf = dst.bytes_mut().as_mut_ptr() as *mut u8;
let mut buf = dst.chunk_mut().as_mut_ptr();
let mut remaining = dst.capacity() - dst.len();
// tracks bytes written since last buffer resize
@ -140,117 +141,67 @@ pub(crate) trait MessageType: Sized {
// container's knowledge, this is used to sync the containers cursor after data is written
let mut pos = 0;
for (key, value) in headers {
self.write_headers(|key, value| {
match *key {
CONNECTION => continue,
TRANSFER_ENCODING | CONTENT_LENGTH if skip_len => continue,
CONNECTION => return,
TRANSFER_ENCODING | CONTENT_LENGTH if skip_len => return,
DATE => has_date = true,
_ => (),
_ => {}
}
let k = key.as_str().as_bytes();
let k_len = k.len();
match value {
map::Value::One(ref val) => {
let v = val.as_ref();
let v_len = v.len();
// TODO: drain?
for val in value.iter() {
let v = val.as_ref();
let v_len = v.len();
// key length + value length + colon + space + \r\n
let len = k_len + v_len + 4;
// key length + value length + colon + space + \r\n
let len = k_len + v_len + 4;
if len > remaining {
// not enough room in buffer for this header; reserve more space
// SAFETY: all the bytes written up to position "pos" are initialized
// the written byte count and pointer advancement are kept in sync
unsafe {
dst.advance_mut(pos);
}
pos = 0;
dst.reserve(len * 2);
remaining = dst.capacity() - dst.len();
// re-assign buf raw pointer since it's possible that the buffer was
// reallocated and/or resized
buf = dst.bytes_mut().as_mut_ptr() as *mut u8;
}
// SAFETY: on each write, it is enough to ensure that the advancement of the
// cursor matches the number of bytes written
if len > remaining {
// SAFETY: all the bytes written up to position "pos" are initialized
// the written byte count and pointer advancement are kept in sync
unsafe {
// use upper Camel-Case
if camel_case {
write_camel_case(k, from_raw_parts_mut(buf, k_len))
} else {
write_data(k, buf, k_len)
}
buf = buf.add(k_len);
write_data(b": ", buf, 2);
buf = buf.add(2);
write_data(v, buf, v_len);
buf = buf.add(v_len);
write_data(b"\r\n", buf, 2);
buf = buf.add(2);
dst.advance_mut(pos);
}
pos += len;
remaining -= len;
pos = 0;
dst.reserve(len * 2);
remaining = dst.capacity() - dst.len();
// re-assign buf raw pointer since it's possible that the buffer was
// reallocated and/or resized
buf = dst.chunk_mut().as_mut_ptr();
}
map::Value::Multi(ref vec) => {
for val in vec {
let v = val.as_ref();
let v_len = v.len();
let len = k_len + v_len + 4;
if len > remaining {
// SAFETY: all the bytes written up to position "pos" are initialized
// the written byte count and pointer advancement are kept in sync
unsafe {
dst.advance_mut(pos);
}
pos = 0;
dst.reserve(len * 2);
remaining = dst.capacity() - dst.len();
// re-assign buf raw pointer since it's possible that the buffer was
// reallocated and/or resized
buf = dst.bytes_mut().as_mut_ptr() as *mut u8;
}
// SAFETY: on each write, it is enough to ensure that the advancement of
// the cursor matches the number of bytes written
unsafe {
if camel_case {
write_camel_case(k, from_raw_parts_mut(buf, k_len));
} else {
write_data(k, buf, k_len);
}
buf = buf.add(k_len);
write_data(b": ", buf, 2);
buf = buf.add(2);
write_data(v, buf, v_len);
buf = buf.add(v_len);
write_data(b"\r\n", buf, 2);
buf = buf.add(2);
};
pos += len;
remaining -= len;
// SAFETY: on each write, it is enough to ensure that the advancement of
// the cursor matches the number of bytes written
unsafe {
if camel_case {
// use Camel-Case headers
write_camel_case(k, buf, k_len);
} else {
write_data(k, buf, k_len);
}
}
buf = buf.add(k_len);
write_data(b": ", buf, 2);
buf = buf.add(2);
write_data(v, buf, v_len);
buf = buf.add(v_len);
write_data(b"\r\n", buf, 2);
buf = buf.add(2);
};
pos += len;
remaining -= len;
}
}
});
// final cursor synchronization with the bytes container
//
@ -270,6 +221,24 @@ pub(crate) trait MessageType: Sized {
Ok(())
}
fn write_headers<F>(&mut self, mut f: F)
where
F: FnMut(&HeaderName, &Value),
{
match self.extra_headers() {
Some(headers) => {
// merging headers from head and extra headers.
self.headers()
.inner
.iter()
.filter(|(name, _)| !headers.contains_key(*name))
.chain(headers.inner.iter())
.for_each(|(k, v)| f(k, v))
}
None => self.headers().inner.iter().for_each(|(k, v)| f(k, v)),
}
}
}
impl MessageType for Response<()> {
@ -289,6 +258,12 @@ impl MessageType for Response<()> {
None
}
fn camel_case(&self) -> bool {
self.head()
.flags
.contains(crate::message::Flags::CAMEL_CASE)
}
fn encode_status(&mut self, dst: &mut BytesMut) -> io::Result<()> {
let head = self.head();
let reason = head.reason().as_bytes();
@ -326,7 +301,7 @@ impl MessageType for RequestHeadType {
let head = self.as_ref();
dst.reserve(256 + head.headers.len() * AVERAGE_HEADER_SIZE);
write!(
Writer(dst),
helpers::MutWriter(dst),
"{} {} {}",
head.method,
head.uri.path_and_query().map(|u| u.as_str()).unwrap_or("/"),
@ -336,11 +311,7 @@ impl MessageType for RequestHeadType {
Version::HTTP_11 => "HTTP/1.1",
Version::HTTP_2 => "HTTP/2.0",
Version::HTTP_3 => "HTTP/3.0",
_ =>
return Err(io::Error::new(
io::ErrorKind::Other,
"unsupported version"
)),
_ => return Err(io::Error::new(io::ErrorKind::Other, "unsupported version")),
}
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
@ -366,13 +337,13 @@ impl<T: MessageType> MessageEncoder<T> {
stream: bool,
version: Version,
length: BodySize,
ctype: ConnectionType,
conn_type: ConnectionType,
config: &ServiceConfig,
) -> io::Result<()> {
// transfer encoding
if !head {
self.te = match length {
BodySize::Empty => TransferEncoding::empty(),
BodySize::Sized(0) => TransferEncoding::empty(),
BodySize::Sized(len) => TransferEncoding::length(len),
BodySize::Stream => {
if message.chunked() && !stream {
@ -388,7 +359,7 @@ impl<T: MessageType> MessageEncoder<T> {
}
message.encode_status(dst)?;
message.encode_headers(dst, version, length, ctype, config)
message.encode_headers(dst, version, length, conn_type, config)
}
}
@ -402,10 +373,12 @@ pub(crate) struct TransferEncoding {
enum TransferEncodingKind {
/// An Encoder for when Transfer-Encoding includes `chunked`.
Chunked(bool),
/// An Encoder for when Content-Length is set.
///
/// Enforces that the body is not longer than the Content-Length header.
Length(u64),
/// An Encoder for when Content-Length is not known.
///
/// Application decides when to stop writing.
@ -459,7 +432,7 @@ impl TransferEncoding {
*eof = true;
buf.extend_from_slice(b"0\r\n\r\n");
} else {
writeln!(Writer(buf), "{:X}\r", msg.len())
writeln!(helpers::MutWriter(buf), "{:X}\r", msg.len())
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
buf.reserve(msg.len() + 2);
@ -509,50 +482,44 @@ impl TransferEncoding {
}
}
struct Writer<'a>(pub &'a mut BytesMut);
impl<'a> io::Write for Writer<'a> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.extend_from_slice(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
/// # Safety
/// Callers must ensure that the given length matches given value length.
/// Callers must ensure that the given `len` matches the given `value` length and that `buf` is
/// valid for writes of at least `len` bytes.
unsafe fn write_data(value: &[u8], buf: *mut u8, len: usize) {
debug_assert_eq!(value.len(), len);
copy_nonoverlapping(value.as_ptr(), buf, len);
}
fn write_camel_case(value: &[u8], buffer: &mut [u8]) {
let mut index = 0;
let key = value;
let mut key_iter = key.iter();
/// # Safety
/// Callers must ensure that the given `len` matches the given `value` length and that `buf` is
/// valid for writes of at least `len` bytes.
unsafe fn write_camel_case(value: &[u8], buf: *mut u8, len: usize) {
// first copy entire (potentially wrong) slice to output
write_data(value, buf, len);
if let Some(c) = key_iter.next() {
if *c >= b'a' && *c <= b'z' {
buffer[index] = *c ^ b' ';
index += 1;
}
} else {
return;
// SAFETY: We just initialized the buffer with `value`
let buffer = from_raw_parts_mut(buf, len);
let mut iter = value.iter();
// first character should be uppercase
if let Some(c @ b'a'..=b'z') = iter.next() {
buffer[0] = c & 0b1101_1111;
}
while let Some(c) = key_iter.next() {
buffer[index] = *c;
index += 1;
if *c == b'-' {
if let Some(c) = key_iter.next() {
if *c >= b'a' && *c <= b'z' {
buffer[index] = *c ^ b' ';
index += 1;
}
// track 1 ahead of the current position since that's the location being assigned to
let mut index = 2;
// remaining characters after hyphens should also be uppercase
while let Some(&c) = iter.next() {
if c == b'-' {
// advance iter by one and uppercase if needed
if let Some(c @ b'a'..=b'z') = iter.next() {
buffer[index] = c & 0b1101_1111;
}
}
index += 1;
}
}
@ -564,8 +531,10 @@ mod tests {
use http::header::AUTHORIZATION;
use super::*;
use crate::http::header::{HeaderValue, CONTENT_TYPE};
use crate::RequestHead;
use crate::{
header::{HeaderValue, CONTENT_TYPE},
RequestHead,
};
#[test]
fn test_chunked_te() {
@ -581,8 +550,8 @@ mod tests {
);
}
#[test]
fn test_camel_case() {
#[actix_rt::test]
async fn test_camel_case() {
let mut bytes = BytesMut::with_capacity(2048);
let mut head = RequestHead::default();
head.set_camel_case_headers(true);
@ -595,12 +564,12 @@ mod tests {
let _ = head.encode_headers(
&mut bytes,
Version::HTTP_11,
BodySize::Empty,
BodySize::Sized(0),
ConnectionType::Close,
&ServiceConfig::default(),
);
let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(data.contains("Content-Length: 0\r\n"));
assert!(data.contains("Connection: close\r\n"));
assert!(data.contains("Content-Type: plain/text\r\n"));
@ -613,8 +582,7 @@ mod tests {
ConnectionType::KeepAlive,
&ServiceConfig::default(),
);
let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(data.contains("Transfer-Encoding: chunked\r\n"));
assert!(data.contains("Content-Type: plain/text\r\n"));
assert!(data.contains("Date: date\r\n"));
@ -635,16 +603,15 @@ mod tests {
ConnectionType::KeepAlive,
&ServiceConfig::default(),
);
let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(data.contains("transfer-encoding: chunked\r\n"));
assert!(data.contains("content-type: xml\r\n"));
assert!(data.contains("content-type: plain/text\r\n"));
assert!(data.contains("date: date\r\n"));
}
#[test]
fn test_extra_headers() {
#[actix_rt::test]
async fn test_extra_headers() {
let mut bytes = BytesMut::with_capacity(2048);
let mut head = RequestHead::default();
@ -665,15 +632,35 @@ mod tests {
let _ = head.encode_headers(
&mut bytes,
Version::HTTP_11,
BodySize::Empty,
BodySize::Sized(0),
ConnectionType::Close,
&ServiceConfig::default(),
);
let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(data.contains("content-length: 0\r\n"));
assert!(data.contains("connection: close\r\n"));
assert!(data.contains("authorization: another authorization\r\n"));
assert!(data.contains("date: date\r\n"));
}
#[actix_rt::test]
async fn test_no_content_length() {
let mut bytes = BytesMut::with_capacity(2048);
let mut res = Response::with_body(StatusCode::SWITCHING_PROTOCOLS, ());
res.headers_mut().insert(DATE, HeaderValue::from_static(""));
res.headers_mut()
.insert(CONTENT_LENGTH, HeaderValue::from_static("0"));
let _ = res.encode_headers(
&mut bytes,
Version::HTTP_11,
BodySize::Stream,
ConnectionType::Upgrade,
&ServiceConfig::default(),
);
let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(!data.contains("content-length: 0\r\n"));
assert!(!data.contains("transfer-encoding: chunked\r\n"));
}
}

View File

@ -1,38 +1,33 @@
use std::task::{Context, Poll};
use actix_service::{Service, ServiceFactory};
use futures_util::future::{ok, Ready};
use actix_utils::future::{ready, Ready};
use crate::error::Error;
use crate::request::Request;
use crate::{Error, Request};
pub struct ExpectHandler;
impl ServiceFactory for ExpectHandler {
type Config = ();
type Request = Request;
impl ServiceFactory<Request> for ExpectHandler {
type Response = Request;
type Error = Error;
type Config = ();
type Service = ExpectHandler;
type InitError = Error;
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
ok(ExpectHandler)
fn new_service(&self, _: Self::Config) -> Self::Future {
ready(Ok(ExpectHandler))
}
}
impl Service for ExpectHandler {
type Request = Request;
impl Service<Request> for ExpectHandler {
type Response = Request;
type Error = Error;
type Future = Ready<Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
actix_service::always_ready!();
fn call(&mut self, req: Request) -> Self::Future {
ok(req)
fn call(&self, req: Request) -> Self::Future {
ready(Ok(req))
// TODO: add some way to trigger error
// Err(error::ErrorExpectationFailed("test"))
}
}

View File

@ -1,6 +1,8 @@
//! HTTP/1 implementation
//! HTTP/1 protocol implementation.
use bytes::{Bytes, BytesMut};
mod chunked;
mod client;
mod codec;
mod decoder;
@ -17,7 +19,7 @@ pub use self::codec::Codec;
pub use self::dispatcher::Dispatcher;
pub use self::expect::ExpectHandler;
pub use self::payload::Payload;
pub use self::service::{H1Service, H1ServiceHandler, OneRequest};
pub use self::service::{H1Service, H1ServiceHandler};
pub use self::upgrade::UpgradeHandler;
pub use self::utils::SendResponse;
@ -57,7 +59,7 @@ pub(crate) fn reserve_readbuf(src: &mut BytesMut) {
#[cfg(test)]
mod tests {
use super::*;
use crate::request::Request;
use crate::Request;
impl Message<Request> {
pub fn message(self) -> Request {

View File

@ -1,11 +1,13 @@
//! Payload stream
use std::cell::RefCell;
use std::collections::VecDeque;
use std::pin::Pin;
use std::rc::{Rc, Weak};
use std::task::{Context, Poll};
use actix_utils::task::LocalWaker;
use std::{
cell::RefCell,
collections::VecDeque,
pin::Pin,
rc::{Rc, Weak},
task::{Context, Poll, Waker},
};
use bytes::Bytes;
use futures_core::Stream;
@ -23,39 +25,32 @@ pub enum PayloadStatus {
/// Buffered stream of bytes chunks
///
/// Payload stores chunks in a vector. First chunk can be received with
/// `.readany()` method. Payload stream is not thread safe. Payload does not
/// notify current task when new data is available.
/// Payload stores chunks in a vector. First chunk can be received with `poll_next`. Payload does
/// not notify current task when new data is available.
///
/// Payload stream can be used as `Response` body stream.
/// Payload can be used as `Response` body stream.
#[derive(Debug)]
pub struct Payload {
inner: Rc<RefCell<Inner>>,
}
impl Payload {
/// Create payload stream.
/// Creates a payload stream.
///
/// This method construct two objects responsible for bytes stream
/// generation.
///
/// * `PayloadSender` - *Sender* side of the stream
///
/// * `Payload` - *Receiver* side of the stream
/// This method construct two objects responsible for bytes stream generation:
/// - `PayloadSender` - *Sender* side of the stream
/// - `Payload` - *Receiver* side of the stream
pub fn create(eof: bool) -> (PayloadSender, Payload) {
let shared = Rc::new(RefCell::new(Inner::new(eof)));
(
PayloadSender {
inner: Rc::downgrade(&shared),
},
PayloadSender::new(Rc::downgrade(&shared)),
Payload { inner: shared },
)
}
/// Create empty payload
#[doc(hidden)]
pub fn empty() -> Payload {
/// Creates an empty payload.
pub(crate) fn empty() -> Payload {
Payload {
inner: Rc::new(RefCell::new(Inner::new(true))),
}
@ -78,14 +73,6 @@ impl Payload {
pub fn unread_data(&mut self, data: Bytes) {
self.inner.borrow_mut().unread_data(data);
}
#[inline]
pub fn readany(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, PayloadError>>> {
self.inner.borrow_mut().readany(cx)
}
}
impl Stream for Payload {
@ -95,7 +82,7 @@ impl Stream for Payload {
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, PayloadError>>> {
self.inner.borrow_mut().readany(cx)
Pin::new(&mut *self.inner.borrow_mut()).poll_next(cx)
}
}
@ -105,6 +92,10 @@ pub struct PayloadSender {
}
impl PayloadSender {
fn new(inner: Weak<RefCell<Inner>>) -> Self {
Self { inner }
}
#[inline]
pub fn set_error(&mut self, err: PayloadError) {
if let Some(shared) = self.inner.upgrade() {
@ -134,7 +125,7 @@ impl PayloadSender {
if shared.borrow().need_read {
PayloadStatus::Read
} else {
shared.borrow_mut().io_task.register(cx.waker());
shared.borrow_mut().register_io(cx);
PayloadStatus::Pause
}
} else {
@ -150,8 +141,8 @@ struct Inner {
err: Option<PayloadError>,
need_read: bool,
items: VecDeque<Bytes>,
task: LocalWaker,
io_task: LocalWaker,
task: Option<Waker>,
io_task: Option<Waker>,
}
impl Inner {
@ -162,8 +153,46 @@ impl Inner {
err: None,
items: VecDeque::new(),
need_read: true,
task: LocalWaker::new(),
io_task: LocalWaker::new(),
task: None,
io_task: None,
}
}
/// Wake up future waiting for payload data to be available.
fn wake(&mut self) {
if let Some(waker) = self.task.take() {
waker.wake();
}
}
/// Wake up future feeding data to Payload.
fn wake_io(&mut self) {
if let Some(waker) = self.io_task.take() {
waker.wake();
}
}
/// Register future waiting data from payload.
/// Waker would be used in `Inner::wake`
fn register(&mut self, cx: &mut Context<'_>) {
if self
.task
.as_ref()
.map_or(true, |w| !cx.waker().will_wake(w))
{
self.task = Some(cx.waker().clone());
}
}
// Register future feeding data to payload.
/// Waker would be used in `Inner::wake_io`
fn register_io(&mut self, cx: &mut Context<'_>) {
if self
.io_task
.as_ref()
.map_or(true, |w| !cx.waker().will_wake(w))
{
self.io_task = Some(cx.waker().clone());
}
}
@ -182,9 +211,7 @@ impl Inner {
self.len += data.len();
self.items.push_back(data);
self.need_read = self.len < MAX_BUFFER_SIZE;
if let Some(task) = self.task.take() {
task.wake()
}
self.wake();
}
#[cfg(test)]
@ -192,8 +219,8 @@ impl Inner {
self.len
}
fn readany(
&mut self,
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, PayloadError>>> {
if let Some(data) = self.items.pop_front() {
@ -201,9 +228,9 @@ impl Inner {
self.need_read = self.len < MAX_BUFFER_SIZE;
if self.need_read && !self.eof {
self.task.register(cx.waker());
self.register(cx);
}
self.io_task.wake();
self.wake_io();
Poll::Ready(Some(Ok(data)))
} else if let Some(err) = self.err.take() {
Poll::Ready(Some(Err(err)))
@ -211,8 +238,8 @@ impl Inner {
Poll::Ready(None)
} else {
self.need_read = true;
self.task.register(cx.waker());
self.io_task.wake();
self.register(cx);
self.wake_io();
Poll::Pending
}
}
@ -225,8 +252,18 @@ impl Inner {
#[cfg(test)]
mod tests {
use std::panic::{RefUnwindSafe, UnwindSafe};
use actix_utils::future::poll_fn;
use static_assertions::{assert_impl_all, assert_not_impl_any};
use super::*;
use futures_util::future::poll_fn;
assert_impl_all!(Payload: Unpin);
assert_not_impl_any!(Payload: Send, Sync, UnwindSafe, RefUnwindSafe);
assert_impl_all!(Inner: Unpin, Send, Sync);
assert_not_impl_any!(Inner: UnwindSafe, RefUnwindSafe);
#[actix_rt::test]
async fn test_unread_data() {
@ -238,7 +275,10 @@ mod tests {
assert_eq!(
Bytes::from("data"),
poll_fn(|cx| payload.readany(cx)).await.unwrap().unwrap()
poll_fn(|cx| Pin::new(&mut payload).poll_next(cx))
.await
.unwrap()
.unwrap()
);
}
}

View File

@ -1,48 +1,49 @@
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::rc::Rc;
use std::task::{Context, Poll};
use std::{fmt, net};
use std::{
fmt,
marker::PhantomData,
net,
rc::Rc,
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_rt::net::TcpStream;
use actix_service::{pipeline_factory, IntoServiceFactory, Service, ServiceFactory};
use futures_core::ready;
use futures_util::future::{ok, Ready};
use actix_service::{
fn_service, IntoServiceFactory, Service, ServiceFactory, ServiceFactoryExt as _,
};
use actix_utils::future::ready;
use futures_core::future::LocalBoxFuture;
use crate::body::MessageBody;
use crate::cloneable::CloneableService;
use crate::config::ServiceConfig;
use crate::error::{DispatchError, Error, ParseError};
use crate::helpers::DataFactory;
use crate::request::Request;
use crate::response::Response;
use crate::{
body::{BoxBody, MessageBody},
config::ServiceConfig,
error::DispatchError,
service::HttpServiceHandler,
ConnectCallback, OnConnectData, Request, Response,
};
use super::codec::Codec;
use super::dispatcher::Dispatcher;
use super::{ExpectHandler, Message, UpgradeHandler};
use super::{codec::Codec, dispatcher::Dispatcher, ExpectHandler, UpgradeHandler};
/// `ServiceFactory` implementation for HTTP1 transport
pub struct H1Service<T, S, B, X = ExpectHandler, U = UpgradeHandler<T>> {
pub struct H1Service<T, S, B, X = ExpectHandler, U = UpgradeHandler> {
srv: S,
cfg: ServiceConfig,
expect: X,
upgrade: Option<U>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
_t: PhantomData<(T, B)>,
on_connect_ext: Option<Rc<ConnectCallback<T>>>,
_phantom: PhantomData<B>,
}
impl<T, S, B> H1Service<T, S, B>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error>,
S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
{
/// Create new `HttpService` instance with config.
pub(crate) fn with_config<F: IntoServiceFactory<S>>(
pub(crate) fn with_config<F: IntoServiceFactory<S, Request>>(
cfg: ServiceConfig,
service: F,
) -> Self {
@ -51,43 +52,45 @@ where
srv: service.into_factory(),
expect: ExpectHandler,
upgrade: None,
on_connect: None,
_t: PhantomData,
on_connect_ext: None,
_phantom: PhantomData,
}
}
}
impl<S, B, X, U> H1Service<TcpStream, S, B, X, U>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error>,
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
X::Error: Into<Error>,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Future: 'static,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<
Config = (),
Request = (Request, Framed<TcpStream, Codec>),
Response = (),
>,
U::Error: fmt::Display + Into<Error>,
U: ServiceFactory<(Request, Framed<TcpStream, Codec>), Config = (), Response = ()>,
U::Future: 'static,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
/// Create simple tcp stream service
pub fn tcp(
self,
) -> impl ServiceFactory<
TcpStream,
Config = (),
Request = TcpStream,
Response = (),
Error = DispatchError,
InitError = (),
> {
pipeline_factory(|io: TcpStream| {
fn_service(|io: TcpStream| {
let peer_addr = io.peer_addr().ok();
ok((io, peer_addr))
ready(Ok((io, peer_addr)))
})
.and_then(self)
}
@ -97,114 +100,137 @@ where
mod openssl {
use super::*;
use actix_tls::openssl::{Acceptor, SslAcceptor, SslStream};
use actix_tls::{openssl::HandshakeError, TlsError};
use actix_tls::accept::{
openssl::{
reexports::{Error as SslError, SslAcceptor},
Acceptor, TlsStream,
},
TlsError,
};
impl<S, B, X, U> H1Service<SslStream<TcpStream>, S, B, X, U>
impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error>,
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
X::Error: Into<Error>,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Future: 'static,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<
(Request, Framed<TlsStream<TcpStream>, Codec>),
Config = (),
Request = (Request, Framed<SslStream<TcpStream>, Codec>),
Response = (),
>,
U::Error: fmt::Display + Into<Error>,
U::Future: 'static,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
/// Create openssl based service
/// Create OpenSSL based service.
pub fn openssl(
self,
acceptor: SslAcceptor,
) -> impl ServiceFactory<
TcpStream,
Config = (),
Request = TcpStream,
Response = (),
Error = TlsError<HandshakeError<TcpStream>, DispatchError>,
Error = TlsError<SslError, DispatchError>,
InitError = (),
> {
pipeline_factory(
Acceptor::new(acceptor)
.map_err(TlsError::Tls)
.map_init_err(|_| panic!()),
)
.and_then(|io: SslStream<TcpStream>| {
let peer_addr = io.get_ref().peer_addr().ok();
ok((io, peer_addr))
})
.and_then(self.map_err(TlsError::Service))
Acceptor::new(acceptor)
.map_init_err(|_| {
unreachable!("TLS acceptor service factory does not error on init")
})
.map_err(TlsError::into_service_error)
.map(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().peer_addr().ok();
(io, peer_addr)
})
.and_then(self.map_err(TlsError::Service))
}
}
}
#[cfg(feature = "rustls")]
mod rustls {
use std::io;
use actix_service::ServiceFactoryExt as _;
use actix_tls::accept::{
rustls::{reexports::ServerConfig, Acceptor, TlsStream},
TlsError,
};
use super::*;
use actix_tls::rustls::{Acceptor, ServerConfig, TlsStream};
use actix_tls::TlsError;
use std::{fmt, io};
impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error>,
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
X::Error: Into<Error>,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Future: 'static,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<
(Request, Framed<TlsStream<TcpStream>, Codec>),
Config = (),
Request = (Request, Framed<TlsStream<TcpStream>, Codec>),
Response = (),
>,
U::Error: fmt::Display + Into<Error>,
U::Future: 'static,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
/// Create rustls based service
/// Create Rustls based service.
pub fn rustls(
self,
config: ServerConfig,
) -> impl ServiceFactory<
TcpStream,
Config = (),
Request = TcpStream,
Response = (),
Error = TlsError<io::Error, DispatchError>,
InitError = (),
> {
pipeline_factory(
Acceptor::new(config)
.map_err(TlsError::Tls)
.map_init_err(|_| panic!()),
)
.and_then(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().0.peer_addr().ok();
ok((io, peer_addr))
})
.and_then(self.map_err(TlsError::Service))
Acceptor::new(config)
.map_init_err(|_| {
unreachable!("TLS acceptor service factory does not error on init")
})
.map_err(TlsError::into_service_error)
.map(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().0.peer_addr().ok();
(io, peer_addr)
})
.and_then(self.map_err(TlsError::Service))
}
}
}
impl<T, S, B, X, U> H1Service<T, S, B, X, U>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error>,
S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Response<BoxBody>>,
S::Response: Into<Response<B>>,
S::InitError: fmt::Debug,
B: MessageBody,
{
pub fn expect<X1>(self, expect: X1) -> H1Service<T, S, B, X1, U>
where
X1: ServiceFactory<Request = Request, Response = Request>,
X1::Error: Into<Error>,
X1: ServiceFactory<Request, Response = Request>,
X1::Error: Into<Response<BoxBody>>,
X1::InitError: fmt::Debug,
{
H1Service {
@ -212,14 +238,14 @@ where
cfg: self.cfg,
srv: self.srv,
upgrade: self.upgrade,
on_connect: self.on_connect,
_t: PhantomData,
on_connect_ext: self.on_connect_ext,
_phantom: PhantomData,
}
}
pub fn upgrade<U1>(self, upgrade: Option<U1>) -> H1Service<T, S, B, X, U1>
where
U1: ServiceFactory<Request = (Request, Framed<T, Codec>), Response = ()>,
U1: ServiceFactory<(Request, Framed<T, Codec>), Response = ()>,
U1::Error: fmt::Display,
U1::InitError: fmt::Debug,
{
@ -228,354 +254,116 @@ where
cfg: self.cfg,
srv: self.srv,
expect: self.expect,
on_connect: self.on_connect,
_t: PhantomData,
on_connect_ext: self.on_connect_ext,
_phantom: PhantomData,
}
}
/// Set on connect callback.
pub(crate) fn on_connect(
mut self,
f: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
) -> Self {
self.on_connect = f;
pub(crate) fn on_connect_ext(mut self, f: Option<Rc<ConnectCallback<T>>>) -> Self {
self.on_connect_ext = f;
self
}
}
impl<T, S, B, X, U> ServiceFactory for H1Service<T, S, B, X, U>
impl<T, S, B, X, U> ServiceFactory<(T, Option<net::SocketAddr>)> for H1Service<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error>,
T: AsyncRead + AsyncWrite + Unpin + 'static,
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>>,
S::Response: Into<Response<B>>,
S::InitError: fmt::Debug,
B: MessageBody,
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
X::Error: Into<Error>,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Future: 'static,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<Config = (), Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display + Into<Error>,
U: ServiceFactory<(Request, Framed<T, Codec>), Config = (), Response = ()>,
U::Future: 'static,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
type Config = ();
type Request = (T, Option<net::SocketAddr>);
type Response = ();
type Error = DispatchError;
type InitError = ();
type Config = ();
type Service = H1ServiceHandler<T, S::Service, B, X::Service, U::Service>;
type Future = H1ServiceResponse<T, S, B, X, U>;
fn new_service(&self, _: ()) -> Self::Future {
H1ServiceResponse {
fut: self.srv.new_service(()),
fut_ex: Some(self.expect.new_service(())),
fut_upg: self.upgrade.as_ref().map(|f| f.new_service(())),
expect: None,
upgrade: None,
on_connect: self.on_connect.clone(),
cfg: Some(self.cfg.clone()),
_t: PhantomData,
}
}
}
#[doc(hidden)]
#[pin_project::pin_project]
pub struct H1ServiceResponse<T, S, B, X, U>
where
S: ServiceFactory<Request = Request>,
S::Error: Into<Error>,
S::InitError: fmt::Debug,
X: ServiceFactory<Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
U: ServiceFactory<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
U::InitError: fmt::Debug,
{
#[pin]
fut: S::Future,
#[pin]
fut_ex: Option<X::Future>,
#[pin]
fut_upg: Option<U::Future>,
expect: Option<X::Service>,
upgrade: Option<U::Service>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
cfg: Option<ServiceConfig>,
_t: PhantomData<(T, B)>,
}
impl<T, S, B, X, U> Future for H1ServiceResponse<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
S: ServiceFactory<Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
S::InitError: fmt::Debug,
B: MessageBody,
X: ServiceFactory<Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
U: ServiceFactory<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
U::InitError: fmt::Debug,
{
type Output = Result<H1ServiceHandler<T, S::Service, B, X::Service, U::Service>, ()>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.as_mut().project();
if let Some(fut) = this.fut_ex.as_pin_mut() {
let expect = ready!(fut
.poll(cx)
.map_err(|e| log::error!("Init http service error: {:?}", e)))?;
this = self.as_mut().project();
*this.expect = Some(expect);
this.fut_ex.set(None);
}
if let Some(fut) = this.fut_upg.as_pin_mut() {
let upgrade = ready!(fut
.poll(cx)
.map_err(|e| log::error!("Init http service error: {:?}", e)))?;
this = self.as_mut().project();
*this.upgrade = Some(upgrade);
this.fut_ex.set(None);
}
let result = ready!(this
.fut
.poll(cx)
.map_err(|e| log::error!("Init http service error: {:?}", e)));
Poll::Ready(result.map(|service| {
let this = self.as_mut().project();
H1ServiceHandler::new(
this.cfg.take().unwrap(),
service,
this.expect.take().unwrap(),
this.upgrade.take(),
this.on_connect.clone(),
)
}))
}
}
/// `Service` implementation for HTTP1 transport
pub struct H1ServiceHandler<T, S: Service, B, X: Service, U: Service> {
srv: CloneableService<S>,
expect: CloneableService<X>,
upgrade: Option<CloneableService<U>>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
cfg: ServiceConfig,
_t: PhantomData<(T, B)>,
}
impl<T, S, B, X, U> H1ServiceHandler<T, S, B, X, U>
where
S: Service<Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
fn new(
cfg: ServiceConfig,
srv: S,
expect: X,
upgrade: Option<U>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
) -> H1ServiceHandler<T, S, B, X, U> {
H1ServiceHandler {
srv: CloneableService::new(srv),
expect: CloneableService::new(expect),
upgrade: upgrade.map(CloneableService::new),
cfg,
on_connect,
_t: PhantomData,
}
}
}
impl<T, S, B, X, U> Service for H1ServiceHandler<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display + Into<Error>,
{
type Request = (T, Option<net::SocketAddr>);
type Response = ();
type Error = DispatchError;
type Future = Dispatcher<T, S, B, X, U>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let ready = self
.expect
.poll_ready(cx)
.map_err(|e| {
let e = e.into();
log::error!("Http service readiness error: {:?}", e);
DispatchError::Service(e)
})?
.is_ready();
let ready = self
.srv
.poll_ready(cx)
.map_err(|e| {
let e = e.into();
log::error!("Http service readiness error: {:?}", e);
DispatchError::Service(e)
})?
.is_ready()
&& ready;
let ready = if let Some(ref mut upg) = self.upgrade {
upg.poll_ready(cx)
.map_err(|e| {
let e = e.into();
log::error!("Http service readiness error: {:?}", e);
DispatchError::Service(e)
})?
.is_ready()
&& ready
} else {
ready
};
if ready {
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
}
fn call(&mut self, (io, addr): Self::Request) -> Self::Future {
let on_connect = if let Some(ref on_connect) = self.on_connect {
Some(on_connect(&io))
} else {
None
};
Dispatcher::new(
io,
self.cfg.clone(),
self.srv.clone(),
self.expect.clone(),
self.upgrade.clone(),
on_connect,
addr,
)
}
}
/// `ServiceFactory` implementation for `OneRequestService` service
#[derive(Default)]
pub struct OneRequest<T> {
config: ServiceConfig,
_t: PhantomData<T>,
}
impl<T> OneRequest<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
/// Create new `H1SimpleService` instance.
pub fn new() -> Self {
OneRequest {
config: ServiceConfig::default(),
_t: PhantomData,
}
}
}
impl<T> ServiceFactory for OneRequest<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
type Config = ();
type Request = T;
type Response = (Request, Framed<T, Codec>);
type Error = ParseError;
type InitError = ();
type Service = OneRequestService<T>;
type Future = Ready<Result<Self::Service, Self::InitError>>;
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
ok(OneRequestService {
_t: PhantomData,
config: self.config.clone(),
let service = self.srv.new_service(());
let expect = self.expect.new_service(());
let upgrade = self.upgrade.as_ref().map(|s| s.new_service(()));
let on_connect_ext = self.on_connect_ext.clone();
let cfg = self.cfg.clone();
Box::pin(async move {
let expect = expect
.await
.map_err(|e| log::error!("Init http expect service error: {:?}", e))?;
let upgrade = match upgrade {
Some(upgrade) => {
let upgrade = upgrade
.await
.map_err(|e| log::error!("Init http upgrade service error: {:?}", e))?;
Some(upgrade)
}
None => None,
};
let service = service
.await
.map_err(|e| log::error!("Init http service error: {:?}", e))?;
Ok(H1ServiceHandler::new(
cfg,
service,
expect,
upgrade,
on_connect_ext,
))
})
}
}
/// `Service` implementation for HTTP1 transport. Reads one request and returns
/// request and framed object.
pub struct OneRequestService<T> {
_t: PhantomData<T>,
config: ServiceConfig,
}
/// `Service` implementation for HTTP/1 transport
pub type H1ServiceHandler<T, S, B, X, U> = HttpServiceHandler<T, S, B, X, U>;
impl<T> Service for OneRequestService<T>
impl<T, S, B, X, U> Service<(T, Option<net::SocketAddr>)> for HttpServiceHandler<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
{
type Request = T;
type Response = (Request, Framed<T, Codec>);
type Error = ParseError;
type Future = OneRequestServiceResponse<T>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
S: Service<Request>,
S::Error: Into<Response<BoxBody>>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request, Response = Request>,
X::Error: Into<Response<BoxBody>>,
U: Service<(Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display + Into<Response<BoxBody>>,
{
type Response = ();
type Error = DispatchError;
type Future = Dispatcher<T, S, B, X, U>;
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self._poll_ready(cx).map_err(|err| {
log::error!("HTTP/1 service readiness error: {:?}", err);
DispatchError::Service(err)
})
}
fn call(&mut self, req: Self::Request) -> Self::Future {
OneRequestServiceResponse {
framed: Some(Framed::new(req, Codec::new(self.config.clone()))),
}
}
}
#[doc(hidden)]
#[pin_project::pin_project]
pub struct OneRequestServiceResponse<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
#[pin]
framed: Option<Framed<T, Codec>>,
}
impl<T> Future for OneRequestServiceResponse<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
type Output = Result<(Request, Framed<T, Codec>), ParseError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.as_mut().project();
match ready!(this.framed.as_pin_mut().unwrap().next_item(cx)) {
Some(Ok(req)) => match req {
Message::Item(req) => {
let mut this = self.as_mut().project();
Poll::Ready(Ok((req, this.framed.take().unwrap())))
}
Message::Chunk(_) => unreachable!("Something is wrong"),
},
Some(Err(err)) => Poll::Ready(Err(err)),
None => Poll::Ready(Err(ParseError::Incomplete)),
}
fn call(&self, (io, addr): (T, Option<net::SocketAddr>)) -> Self::Future {
let conn_data = OnConnectData::from_io(&io, self.on_connect_ext.as_deref());
Dispatcher::new(io, self.flow.clone(), self.cfg.clone(), addr, conn_data)
}
}

View File

@ -1,41 +1,32 @@
use std::marker::PhantomData;
use std::task::{Context, Poll};
use actix_codec::Framed;
use actix_service::{Service, ServiceFactory};
use futures_util::future::Ready;
use futures_core::future::LocalBoxFuture;
use crate::error::Error;
use crate::h1::Codec;
use crate::request::Request;
use crate::{h1::Codec, Error, Request};
pub struct UpgradeHandler<T>(PhantomData<T>);
pub struct UpgradeHandler;
impl<T> ServiceFactory for UpgradeHandler<T> {
type Config = ();
type Request = (Request, Framed<T, Codec>);
impl<T> ServiceFactory<(Request, Framed<T, Codec>)> for UpgradeHandler {
type Response = ();
type Error = Error;
type Service = UpgradeHandler<T>;
type Config = ();
type Service = UpgradeHandler;
type InitError = Error;
type Future = Ready<Result<Self::Service, Self::InitError>>;
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
unimplemented!()
}
}
impl<T> Service for UpgradeHandler<T> {
type Request = (Request, Framed<T, Codec>);
impl<T> Service<(Request, Framed<T, Codec>)> for UpgradeHandler {
type Response = ();
type Error = Error;
type Future = Ready<Result<Self::Response, Self::Error>>;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
actix_service::always_ready!();
fn call(&mut self, _: Self::Request) -> Self::Future {
fn call(&self, _: (Request, Framed<T, Codec>)) -> Self::Future {
unimplemented!()
}
}

View File

@ -1,27 +1,35 @@
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use pin_project_lite::pin_project;
use crate::body::{BodySize, MessageBody, ResponseBody};
use crate::error::Error;
use crate::h1::{Codec, Message};
use crate::response::Response;
use crate::{
body::{BodySize, MessageBody},
h1::{Codec, Message},
Error, Response,
};
/// Send HTTP/1 response
#[pin_project::pin_project]
pub struct SendResponse<T, B> {
res: Option<Message<(Response<()>, BodySize)>>,
#[pin]
body: Option<ResponseBody<B>>,
#[pin]
framed: Option<Framed<T, Codec>>,
pin_project! {
/// Send HTTP/1 response
pub struct SendResponse<T, B> {
res: Option<Message<(Response<()>, BodySize)>>,
#[pin]
body: Option<B>,
#[pin]
framed: Option<Framed<T, Codec>>,
}
}
impl<T, B> SendResponse<T, B>
where
B: MessageBody,
B::Error: Into<Error>,
{
pub fn new(framed: Framed<T, Codec>, response: Response<B>) -> Self {
let (res, body) = response.into_parts();
@ -37,7 +45,8 @@ where
impl<T, B> Future for SendResponse<T, B>
where
T: AsyncRead + AsyncWrite + Unpin,
B: MessageBody + Unpin,
B: MessageBody,
B::Error: Into<Error>,
{
type Output = Result<Framed<T, Codec>, Error>;
@ -60,15 +69,24 @@ where
.unwrap()
.is_write_buf_full()
{
match this.body.as_mut().as_pin_mut().unwrap().poll_next(cx)? {
let next = match this.body.as_mut().as_pin_mut().unwrap().poll_next(cx) {
Poll::Ready(Some(Ok(item))) => Poll::Ready(Some(item)),
Poll::Ready(Some(Err(err))) => return Poll::Ready(Err(err.into())),
Poll::Ready(None) => Poll::Ready(None),
Poll::Pending => Poll::Pending,
};
match next {
Poll::Ready(item) => {
// body is done when item is None
body_done = item.is_none();
if body_done {
let _ = this.body.take();
this.body.set(None);
}
let framed = this.framed.as_mut().as_pin_mut().unwrap();
framed.write(Message::Chunk(item))?;
framed
.write(Message::Chunk(item))
.map_err(|err| Error::new_send_response().with_cause(err))?;
}
Poll::Pending => body_ready = false,
}
@ -79,7 +97,10 @@ where
// flush write buffer
if !framed.is_write_buf_empty() {
match framed.flush(cx)? {
match framed
.flush(cx)
.map_err(|err| Error::new_send_response().with_cause(err))?
{
Poll::Ready(_) => {
if body_ready {
continue;
@ -93,7 +114,9 @@ where
// send response
if let Some(res) = this.res.take() {
framed.write(res)?;
framed
.write(res)
.map_err(|err| Error::new_send_response().with_cause(err))?;
continue;
}

View File

@ -1,363 +1,334 @@
use std::convert::TryFrom;
use std::future::Future;
use std::marker::PhantomData;
use std::net;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{
cmp,
error::Error as StdError,
future::Future,
marker::PhantomData,
net,
pin::Pin,
rc::Rc,
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::time::{Delay, Instant};
use actix_rt::time::{sleep, Sleep};
use actix_service::Service;
use actix_utils::future::poll_fn;
use bytes::{Bytes, BytesMut};
use h2::server::{Connection, SendResponse};
use h2::SendStream;
use http::header::{HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING};
use futures_core::ready;
use h2::{
server::{Connection, SendResponse},
Ping, PingPong,
};
use log::{error, trace};
use pin_project_lite::pin_project;
use crate::body::{BodySize, MessageBody, ResponseBody};
use crate::cloneable::CloneableService;
use crate::config::ServiceConfig;
use crate::error::{DispatchError, Error};
use crate::helpers::DataFactory;
use crate::httpmessage::HttpMessage;
use crate::message::ResponseHead;
use crate::payload::Payload;
use crate::request::Request;
use crate::response::Response;
use crate::{
body::{BodySize, BoxBody, MessageBody},
config::ServiceConfig,
header::{HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING},
service::HttpFlow,
Extensions, OnConnectData, Payload, Request, Response, ResponseHead,
};
const CHUNK_SIZE: usize = 16_384;
/// Dispatcher for HTTP/2 protocol
#[pin_project::pin_project]
pub struct Dispatcher<T, S: Service<Request = Request>, B: MessageBody>
where
T: AsyncRead + AsyncWrite + Unpin,
{
service: CloneableService<S>,
connection: Connection<T, Bytes>,
on_connect: Option<Box<dyn DataFactory>>,
config: ServiceConfig,
peer_addr: Option<net::SocketAddr>,
ka_expire: Instant,
ka_timer: Option<Delay>,
_t: PhantomData<B>,
pin_project! {
/// Dispatcher for HTTP/2 protocol.
pub struct Dispatcher<T, S, B, X, U> {
flow: Rc<HttpFlow<S, X, U>>,
connection: Connection<T, Bytes>,
conn_data: Option<Rc<Extensions>>,
config: ServiceConfig,
peer_addr: Option<net::SocketAddr>,
ping_pong: Option<H2PingPong>,
_phantom: PhantomData<B>
}
}
impl<T, S, B> Dispatcher<T, S, B>
impl<T, S, B, X, U> Dispatcher<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request = Request>,
S::Error: Into<Error>,
// S::Future: 'static,
S::Response: Into<Response<B>>,
B: MessageBody,
{
pub(crate) fn new(
service: CloneableService<S>,
connection: Connection<T, Bytes>,
on_connect: Option<Box<dyn DataFactory>>,
mut conn: Connection<T, Bytes>,
flow: Rc<HttpFlow<S, X, U>>,
config: ServiceConfig,
timeout: Option<Delay>,
peer_addr: Option<net::SocketAddr>,
conn_data: OnConnectData,
timer: Option<Pin<Box<Sleep>>>,
) -> Self {
// let keepalive = config.keep_alive_enabled();
// let flags = if keepalive {
// Flags::KEEPALIVE | Flags::KEEPALIVE_ENABLED
// } else {
// Flags::empty()
// };
let ping_pong = config.keep_alive().map(|dur| H2PingPong {
timer: timer
.map(|mut timer| {
// reset timer if it's received from new function.
timer.as_mut().reset(config.now() + dur);
timer
})
.unwrap_or_else(|| Box::pin(sleep(dur))),
on_flight: false,
ping_pong: conn.ping_pong().unwrap(),
});
// keep-alive timer
let (ka_expire, ka_timer) = if let Some(delay) = timeout {
(delay.deadline(), Some(delay))
} else if let Some(delay) = config.keep_alive_timer() {
(delay.deadline(), Some(delay))
} else {
(config.now(), None)
};
Dispatcher {
service,
Self {
flow,
config,
peer_addr,
connection,
on_connect,
ka_expire,
ka_timer,
_t: PhantomData,
connection: conn,
conn_data: conn_data.0.map(Rc::new),
ping_pong,
_phantom: PhantomData,
}
}
}
impl<T, S, B> Future for Dispatcher<T, S, B>
struct H2PingPong {
timer: Pin<Box<Sleep>>,
on_flight: bool,
ping_pong: PingPong,
}
impl<T, S, B, X, U> Future for Dispatcher<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request = Request>,
S::Error: Into<Error> + 'static,
S: Service<Request>,
S::Error: Into<Response<BoxBody>>,
S::Future: 'static,
S::Response: Into<Response<B>> + 'static,
B: MessageBody + 'static,
S::Response: Into<Response<B>>,
B: MessageBody,
{
type Output = Result<(), DispatchError>;
type Output = Result<(), crate::error::DispatchError>;
#[inline]
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
loop {
match Pin::new(&mut this.connection).poll_accept(cx) {
Poll::Ready(None) => return Poll::Ready(Ok(())),
Poll::Ready(Some(Err(err))) => return Poll::Ready(Err(err.into())),
Poll::Ready(Some(Ok((req, res)))) => {
// update keep-alive expire
if this.ka_timer.is_some() {
if let Some(expire) = this.config.keep_alive_expire() {
this.ka_expire = expire;
}
}
match Pin::new(&mut this.connection).poll_accept(cx)? {
Poll::Ready(Some((req, tx))) => {
let (parts, body) = req.into_parts();
let mut req = Request::with_payload(Payload::<
crate::payload::PayloadStream,
>::H2(
crate::h2::Payload::new(body)
));
let payload = crate::h2::Payload::new(body);
let pl = Payload::H2 { payload };
let mut req = Request::with_payload(pl);
let head = &mut req.head_mut();
let head = req.head_mut();
head.uri = parts.uri;
head.method = parts.method;
head.version = parts.version;
head.headers = parts.headers.into();
head.peer_addr = this.peer_addr;
// set on_connect data
if let Some(ref on_connect) = this.on_connect {
on_connect.set(&mut req.extensions_mut());
}
req.conn_data = this.conn_data.as_ref().map(Rc::clone);
actix_rt::spawn(ServiceResponse::<
S::Future,
S::Response,
S::Error,
B,
> {
state: ServiceResponseState::ServiceCall(
this.service.call(req),
Some(res),
),
config: this.config.clone(),
buffer: None,
_t: PhantomData,
let fut = this.flow.service.call(req);
let config = this.config.clone();
// multiplex request handling with spawn task
actix_rt::spawn(async move {
// resolve service call and send response.
let res = match fut.await {
Ok(res) => handle_response(res.into(), tx, config).await,
Err(err) => {
let res: Response<BoxBody> = err.into();
handle_response(res, tx, config).await
}
};
// log error.
if let Err(err) = res {
match err {
DispatchError::SendResponse(err) => {
trace!("Error sending HTTP/2 response: {:?}", err)
}
DispatchError::SendData(err) => warn!("{:?}", err),
DispatchError::ResponseBody(err) => {
error!("Response payload stream error: {:?}", err)
}
}
}
});
}
Poll::Pending => return Poll::Pending,
}
}
}
}
Poll::Ready(None) => return Poll::Ready(Ok(())),
Poll::Pending => match this.ping_pong.as_mut() {
Some(ping_pong) => loop {
if ping_pong.on_flight {
// When have on flight ping pong. poll pong and and keep alive timer.
// on success pong received update keep alive timer to determine the next timing of
// ping pong.
match ping_pong.ping_pong.poll_pong(cx)? {
Poll::Ready(_) => {
ping_pong.on_flight = false;
#[pin_project::pin_project]
struct ServiceResponse<F, I, E, B> {
#[pin]
state: ServiceResponseState<F, B>,
config: ServiceConfig,
buffer: Option<Bytes>,
_t: PhantomData<(I, E)>,
}
#[pin_project::pin_project(project = ServiceResponseStateProj)]
enum ServiceResponseState<F, B> {
ServiceCall(#[pin] F, Option<SendResponse<Bytes>>),
SendPayload(SendStream<Bytes>, #[pin] ResponseBody<B>),
}
impl<F, I, E, B> ServiceResponse<F, I, E, B>
where
F: Future<Output = Result<I, E>>,
E: Into<Error>,
I: Into<Response<B>>,
B: MessageBody,
{
fn prepare_response(
&self,
head: &ResponseHead,
size: &mut BodySize,
) -> http::Response<()> {
let mut has_date = false;
let mut skip_len = size != &BodySize::Stream;
let mut res = http::Response::new(());
*res.status_mut() = head.status;
*res.version_mut() = http::Version::HTTP_2;
// Content length
match head.status {
http::StatusCode::NO_CONTENT
| http::StatusCode::CONTINUE
| http::StatusCode::PROCESSING => *size = BodySize::None,
http::StatusCode::SWITCHING_PROTOCOLS => {
skip_len = true;
*size = BodySize::Stream;
}
_ => (),
}
let _ = match size {
BodySize::None | BodySize::Stream => None,
BodySize::Empty => res
.headers_mut()
.insert(CONTENT_LENGTH, HeaderValue::from_static("0")),
BodySize::Sized(len) => res.headers_mut().insert(
CONTENT_LENGTH,
HeaderValue::try_from(format!("{}", len)).unwrap(),
),
};
// copy headers
for (key, value) in head.headers.iter() {
match *key {
CONNECTION | TRANSFER_ENCODING => continue, // http2 specific
CONTENT_LENGTH if skip_len => continue,
DATE => has_date = true,
_ => (),
}
res.headers_mut().append(key, value.clone());
}
// set date header
if !has_date {
let mut bytes = BytesMut::with_capacity(29);
self.config.set_date_header(&mut bytes);
res.headers_mut().insert(
DATE,
// SAFETY: serialized date-times are known ASCII strings
unsafe { HeaderValue::from_maybe_shared_unchecked(bytes.freeze()) },
);
}
res
}
}
impl<F, I, E, B> Future for ServiceResponse<F, I, E, B>
where
F: Future<Output = Result<I, E>>,
E: Into<Error>,
I: Into<Response<B>>,
B: MessageBody,
{
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.as_mut().project();
match this.state.project() {
ServiceResponseStateProj::ServiceCall(call, send) => match call.poll(cx) {
Poll::Ready(Ok(res)) => {
let (res, body) = res.into().replace_body(());
let mut send = send.take().unwrap();
let mut size = body.size();
let h2_res = self.as_mut().prepare_response(res.head(), &mut size);
this = self.as_mut().project();
let stream = match send.send_response(h2_res, size.is_eof()) {
Err(e) => {
trace!("Error sending h2 response: {:?}", e);
return Poll::Ready(());
}
Ok(stream) => stream,
};
if size.is_eof() {
Poll::Ready(())
} else {
this.state
.set(ServiceResponseState::SendPayload(stream, body));
self.poll(cx)
}
}
Poll::Pending => Poll::Pending,
Poll::Ready(Err(e)) => {
let res: Response = e.into().into();
let (res, body) = res.replace_body(());
let mut send = send.take().unwrap();
let mut size = body.size();
let h2_res = self.as_mut().prepare_response(res.head(), &mut size);
this = self.as_mut().project();
let stream = match send.send_response(h2_res, size.is_eof()) {
Err(e) => {
trace!("Error sending h2 response: {:?}", e);
return Poll::Ready(());
}
Ok(stream) => stream,
};
if size.is_eof() {
Poll::Ready(())
} else {
this.state.set(ServiceResponseState::SendPayload(
stream,
body.into_body(),
));
self.poll(cx)
}
}
},
ServiceResponseStateProj::SendPayload(ref mut stream, ref mut body) => {
loop {
loop {
if let Some(ref mut buffer) = this.buffer {
match stream.poll_capacity(cx) {
Poll::Pending => return Poll::Pending,
Poll::Ready(None) => return Poll::Ready(()),
Poll::Ready(Some(Ok(cap))) => {
let len = buffer.len();
let bytes = buffer.split_to(std::cmp::min(cap, len));
if let Err(e) = stream.send_data(bytes, false) {
warn!("{:?}", e);
return Poll::Ready(());
} else if !buffer.is_empty() {
let cap =
std::cmp::min(buffer.len(), CHUNK_SIZE);
stream.reserve_capacity(cap);
} else {
this.buffer.take();
}
let dead_line = this.config.keep_alive_expire().unwrap();
ping_pong.timer.as_mut().reset(dead_line);
}
Poll::Ready(Some(Err(e))) => {
warn!("{:?}", e);
return Poll::Ready(());
Poll::Pending => {
return ping_pong.timer.as_mut().poll(cx).map(|_| Ok(()))
}
}
} else {
match body.as_mut().poll_next(cx) {
Poll::Pending => return Poll::Pending,
Poll::Ready(None) => {
if let Err(e) = stream.send_data(Bytes::new(), true)
{
warn!("{:?}", e);
}
return Poll::Ready(());
}
Poll::Ready(Some(Ok(chunk))) => {
stream.reserve_capacity(std::cmp::min(
chunk.len(),
CHUNK_SIZE,
));
*this.buffer = Some(chunk);
}
Poll::Ready(Some(Err(e))) => {
error!("Response payload stream error: {:?}", e);
return Poll::Ready(());
}
}
// When there is no on flight ping pong. keep alive timer is used to wait for next
// timing of ping pong. Therefore at this point it serves as an interval instead.
ready!(ping_pong.timer.as_mut().poll(cx));
ping_pong.ping_pong.send_ping(Ping::opaque())?;
let dead_line = this.config.keep_alive_expire().unwrap();
ping_pong.timer.as_mut().reset(dead_line);
ping_pong.on_flight = true;
}
},
None => return Poll::Pending,
},
}
}
}
}
enum DispatchError {
SendResponse(h2::Error),
SendData(h2::Error),
ResponseBody(Box<dyn StdError>),
}
async fn handle_response<B>(
res: Response<B>,
mut tx: SendResponse<Bytes>,
config: ServiceConfig,
) -> Result<(), DispatchError>
where
B: MessageBody,
{
let (res, body) = res.replace_body(());
// prepare response.
let mut size = body.size();
let res = prepare_response(config, res.head(), &mut size);
let eof = size.is_eof();
// send response head and return on eof.
let mut stream = tx
.send_response(res, eof)
.map_err(DispatchError::SendResponse)?;
if eof {
return Ok(());
}
// poll response body and send chunks to client
actix_rt::pin!(body);
while let Some(res) = poll_fn(|cx| body.as_mut().poll_next(cx)).await {
let mut chunk = res.map_err(|err| DispatchError::ResponseBody(err.into()))?;
'send: loop {
let chunk_size = cmp::min(chunk.len(), CHUNK_SIZE);
// reserve enough space and wait for stream ready.
stream.reserve_capacity(chunk_size);
match poll_fn(|cx| stream.poll_capacity(cx)).await {
// No capacity left. drop body and return.
None => return Ok(()),
Some(Err(err)) => return Err(DispatchError::SendData(err)),
Some(Ok(cap)) => {
// split chunk to writeable size and send to client
let len = chunk.len();
let bytes = chunk.split_to(cmp::min(len, cap));
stream
.send_data(bytes, false)
.map_err(DispatchError::SendData)?;
// Current chuck completely sent. break send loop and poll next one.
if chunk.is_empty() {
break 'send;
}
}
}
}
}
// response body streaming finished. send end of stream and return.
stream
.send_data(Bytes::new(), true)
.map_err(DispatchError::SendData)?;
Ok(())
}
fn prepare_response(
config: ServiceConfig,
head: &ResponseHead,
size: &mut BodySize,
) -> http::Response<()> {
let mut has_date = false;
let mut skip_len = size != &BodySize::Stream;
let mut res = http::Response::new(());
*res.status_mut() = head.status;
*res.version_mut() = http::Version::HTTP_2;
// Content length
match head.status {
http::StatusCode::NO_CONTENT
| http::StatusCode::CONTINUE
| http::StatusCode::PROCESSING => *size = BodySize::None,
http::StatusCode::SWITCHING_PROTOCOLS => {
skip_len = true;
*size = BodySize::Stream;
}
_ => {}
}
let _ = match size {
BodySize::None | BodySize::Stream => None,
BodySize::Sized(0) => {
#[allow(clippy::declare_interior_mutable_const)]
const HV_ZERO: HeaderValue = HeaderValue::from_static("0");
res.headers_mut().insert(CONTENT_LENGTH, HV_ZERO)
}
BodySize::Sized(len) => {
let mut buf = itoa::Buffer::new();
res.headers_mut().insert(
CONTENT_LENGTH,
HeaderValue::from_str(buf.format(*len)).unwrap(),
)
}
};
// copy headers
for (key, value) in head.headers.iter() {
match *key {
// TODO: consider skipping other headers according to:
// https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.2
// omit HTTP/1.x only headers
CONNECTION | TRANSFER_ENCODING => continue,
CONTENT_LENGTH if skip_len => continue,
DATE => has_date = true,
_ => {}
}
res.headers_mut().append(key, value.clone());
}
// set date header
if !has_date {
let mut bytes = BytesMut::with_capacity(29);
config.set_date_header(&mut bytes);
res.headers_mut().insert(
DATE,
// SAFETY: serialized date-times are known ASCII strings
unsafe { HeaderValue::from_maybe_shared_unchecked(bytes.freeze()) },
);
}
res
}

View File

@ -1,50 +1,111 @@
//! HTTP/2 implementation
use std::pin::Pin;
use std::task::{Context, Poll};
//! HTTP/2 protocol.
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::time::Sleep;
use bytes::Bytes;
use futures_core::Stream;
use h2::RecvStream;
use futures_core::{ready, Stream};
use h2::{
server::{handshake, Connection, Handshake},
RecvStream,
};
mod dispatcher;
mod service;
pub use self::dispatcher::Dispatcher;
pub use self::service::H2Service;
use crate::error::PayloadError;
/// H2 receive stream
use crate::{
config::ServiceConfig,
error::{DispatchError, PayloadError},
};
/// HTTP/2 peer stream.
pub struct Payload {
pl: RecvStream,
stream: RecvStream,
}
impl Payload {
pub(crate) fn new(pl: RecvStream) -> Self {
Self { pl }
pub(crate) fn new(stream: RecvStream) -> Self {
Self { stream }
}
}
impl Stream for Payload {
type Item = Result<Bytes, PayloadError>;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
match Pin::new(&mut this.pl).poll_data(cx) {
Poll::Ready(Some(Ok(chunk))) => {
match ready!(Pin::new(&mut this.stream).poll_data(cx)) {
Some(Ok(chunk)) => {
let len = chunk.len();
if let Err(err) = this.pl.flow_control().release_capacity(len) {
Poll::Ready(Some(Err(err.into())))
} else {
Poll::Ready(Some(Ok(chunk)))
match this.stream.flow_control().release_capacity(len) {
Ok(()) => Poll::Ready(Some(Ok(chunk))),
Err(err) => Poll::Ready(Some(Err(err.into()))),
}
}
Poll::Ready(Some(Err(err))) => Poll::Ready(Some(Err(err.into()))),
Poll::Pending => Poll::Pending,
Poll::Ready(None) => Poll::Ready(None),
Some(Err(err)) => Poll::Ready(Some(Err(err.into()))),
None => Poll::Ready(None),
}
}
}
pub(crate) fn handshake_with_timeout<T>(
io: T,
config: &ServiceConfig,
) -> HandshakeWithTimeout<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
HandshakeWithTimeout {
handshake: handshake(io),
timer: config.client_timer().map(Box::pin),
}
}
pub(crate) struct HandshakeWithTimeout<T: AsyncRead + AsyncWrite + Unpin> {
handshake: Handshake<T>,
timer: Option<Pin<Box<Sleep>>>,
}
impl<T> Future for HandshakeWithTimeout<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
type Output = Result<(Connection<T, Bytes>, Option<Pin<Box<Sleep>>>), DispatchError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
match Pin::new(&mut this.handshake).poll(cx)? {
// return the timer on success handshake. It can be re-used for h2 ping-pong.
Poll::Ready(conn) => Poll::Ready(Ok((conn, this.timer.take()))),
Poll::Pending => match this.timer.as_mut() {
Some(timer) => {
ready!(timer.as_mut().poll(cx));
Poll::Ready(Err(DispatchError::SlowRequestTimeout))
}
None => Poll::Pending,
},
}
}
}
#[cfg(test)]
mod tests {
use std::panic::{RefUnwindSafe, UnwindSafe};
use static_assertions::assert_impl_all;
use super::*;
assert_impl_all!(Payload: Unpin, Send, Sync, UnwindSafe, RefUnwindSafe);
}

Some files were not shown because too many files have changed in this diff Show More