1
0
mirror of https://github.com/fafhrd91/actix-web synced 2025-07-03 01:34:32 +02:00

Compare commits

...

660 Commits

Author SHA1 Message Date
fcca515387 prepare actix-multipart release 0.4.0 2022-02-25 20:41:57 +00:00
075932d823 prepare actix-web-actors release 4.0.0 2022-02-25 20:41:33 +00:00
cb379c0e0c prepare actix-files release 0.6.0 2022-02-25 20:36:16 +00:00
d4a5d450de prepare actix-web release 4.0.1 2022-02-25 20:31:46 +00:00
542200cbc2 update readme 2022-02-25 19:11:46 +00:00
d0c08dbb7d prepare releases: actix-http 3.0.0 and actix-web 4.0.0 (#2663) 2022-02-25 18:46:35 +00:00
d0b5fb18d2 update migration guide on middleware 2022-02-22 17:40:38 +00:00
12fb3412a5 remove concurrency groups 2022-02-22 12:52:07 +00:00
2665357a0c fix ci groups 2022-02-22 12:47:57 +00:00
693271e571 add CI job concurrency groups 2022-02-22 12:41:08 +00:00
10ef9b0751 remove useless doctest main fns 2022-02-22 12:32:06 +00:00
ce00c88963 fix changelog typo 2022-02-22 11:46:51 +00:00
75e6ffb057 prepare actix-router release 0.5.0 (#2658) 2022-02-22 11:38:25 +00:00
ad38973767 move blocking error to web (#2660) 2022-02-22 08:45:28 +00:00
1c1d6477ef remove legacy ws test 2022-02-22 07:11:16 +00:00
53509a5361 ignore all http1 connection headers in h2 2022-02-22 07:07:12 +00:00
a6f27baff1 flesh out Responder docs 2022-02-22 07:07:12 +00:00
218e34ee17 fix http error debug impl 2022-02-22 07:07:12 +00:00
11bfa84926 rename simple_service to status_service (#2659) 2022-02-22 07:06:36 +00:00
5aa6f713c7 update errorhandlers migration guide 2022-02-22 06:23:01 +00:00
151a15da74 prepare actix-http release 3.0.0-rc.4 2022-02-22 00:21:49 +00:00
1ce58ecb30 fix dispatcher panic on pending flush
fixes thread panic in actix-http-3.0.0-rc.3 #2655
2022-02-22 00:19:48 +00:00
f940653981 Edits to the migration notes (#2654) 2022-02-19 17:05:54 +00:00
b291e29882 fix links 2022-02-18 03:41:10 +00:00
f843776f36 Fix links in README (#2653) 2022-02-18 03:34:12 +00:00
52f7d96358 tweak migration document 2022-02-17 19:13:03 +00:00
51e573b888 prepare actix-test release 0.1.0-beta.13 2022-02-16 03:13:41 +00:00
38e015432b prepare actix-http-test release 3.0.0-beta.13 2022-02-16 03:13:22 +00:00
f5895d5eff prepare actix-web-actors release 4.0.0-beta.12 2022-02-16 03:11:22 +00:00
a0c4bf8d1b prepare awc release 3.0.0-beta.21 2022-02-16 03:10:01 +00:00
594e3a6ef1 prepare actix-http release 3.0.0-rc.3 2022-02-16 03:07:12 +00:00
a808a26d8c bump actix-codec to 0.5 2022-02-15 20:49:10 +00:00
de62e8b025 add nextest to post-merge ci 2022-02-15 14:40:26 +00:00
3486edabcf update migrations guide re tokio v1 2022-02-15 00:54:12 +00:00
4c59a34513 Remove clone implementation for Path (#2639) 2022-02-10 10:29:00 +00:00
1b706b3069 update body type migration guide 2022-02-09 16:12:39 +00:00
a9f445875a update migration guide 2022-02-09 12:31:06 +00:00
e0f02c1d9e update migration guide 2022-02-08 16:53:09 +00:00
092dbba5b9 update migration guide 2022-02-08 15:24:35 +00:00
ff4b2d251f fix impl assertions 2022-02-08 14:32:57 +00:00
98faa61afe fix impl assertions 2022-02-08 13:37:01 +00:00
3f2db9e75c fix doc tests 2022-02-08 12:25:13 +00:00
074d18209d better document relationship with tokio 2022-02-08 10:21:47 +00:00
593fbde46a prepare actix-web release 4.0.0-rc.3 2022-02-08 09:31:48 +00:00
161861997c prepare actix-http release 3.0.0-rc.2 2022-02-08 09:31:20 +00:00
3d621677a5 clippy 2022-02-08 08:00:47 +00:00
0c144054cb make Condition generic over body type (#2635)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2022-02-08 07:50:05 +00:00
b0fbe0dfd8 fix workers doc 2022-02-08 06:58:33 +00:00
b653bf557f added note to v4 migration guide about worker thread update (#2634) 2022-02-07 19:04:03 +00:00
1d1a65282f RC refinements (#2625) 2022-02-04 20:37:33 +00:00
b0a363a7ae add migration note about fromrequest::configure 2022-02-04 18:48:22 +00:00
b4d3c2394d clean up migration guide 2022-02-04 18:22:38 +00:00
5ca42df89a fix stuck connection when handler doesn't read payload (#2624) 2022-02-03 07:03:39 +00:00
fc5ecdc30b fix changelog 2022-02-02 03:55:43 +00:00
7fe800c3ff prepare actix-web release 4.0.0-rc.2 2022-02-02 03:54:26 +00:00
075df88a07 update 4.0 migration guide 2022-02-02 03:42:07 +00:00
391d8a744a update 4.0 migratio guide 2022-02-02 03:13:11 +00:00
5b6cb681b9 update 4.0 migration guide 2022-02-02 03:09:33 +00:00
0957ec40b4 split migration file 2022-02-02 02:46:37 +00:00
ccf430d74a disable coverage job 2022-02-01 15:24:35 +00:00
c84c1f0f15 simplify macros feature 2022-02-01 14:39:49 +00:00
e9279dfbb8 Fix deprecated notice about client_shutdown (#2621) 2022-02-01 13:44:56 +00:00
a68239adaa bump zstd to 0.10 2022-02-01 13:35:32 +00:00
40a4b1ccd5 add macro feature (#2619)
Co-authored-by: Ibraheem Ahmed <ibrah1440@gmail.com>
2022-02-01 02:35:05 +00:00
7f5a8c0851 fix vmanifest 2022-02-01 00:33:41 +00:00
bcdde1d4ea move actix-web to own dir 2022-02-01 00:30:41 +00:00
30aa64ea32 update dep graphs 2022-02-01 00:23:58 +00:00
5469b02638 prepare actix-web-codegen release 0.5.0-rc.2 2022-02-01 00:12:42 +00:00
a66cd38ec5 prepare actix-web-actors release 4.0.0-beta.11 2022-01-31 22:35:18 +00:00
20609e93fd prepare actix-test release 0.1.0-beta.12 2022-01-31 22:34:59 +00:00
bf282472ab prepare actix-http-test release 3.0.0-beta.12 2022-01-31 22:33:38 +00:00
7f4b44c258 prepare actix-multipart release 0.4.0-beta.13 2022-01-31 22:33:11 +00:00
66243717b3 prepare actix-files release 0.6.0-beta.16 2022-01-31 22:32:52 +00:00
102720d398 prepare awc release 3.0.0-beta.20 2022-01-31 22:32:09 +00:00
c3c7eb8df9 prepare actix-web release 4.0.0-rc.1 2022-01-31 22:23:33 +00:00
21f57caf4a prepare actix-http release 3.0.0-rc.1 2022-01-31 22:22:40 +00:00
47f5faf26e prepare actix-router release 0.5.0-rc.3 2022-01-31 22:21:30 +00:00
9777653dc0 prep readme for rc release 2022-01-31 22:20:53 +00:00
9fde5b30db tweak and document router (#2612)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2022-01-31 22:12:48 +00:00
fd412a8223 Quoter::requote returns Vec<u8> (#2613) 2022-01-31 21:26:34 +00:00
cd511affd5 add ws and http2 feature flags (#2618) 2022-01-31 21:22:23 +00:00
3200de3f34 fix request head timeout (#2611) 2022-01-31 17:30:34 +00:00
b3e84b5c4b tweak default_service docs 2022-01-28 20:53:51 +00:00
a3416112a5 Improve the documentation for default_service (#2614) 2022-01-28 20:31:54 +00:00
21a08ca796 tweak set_content_encoding docs 2022-01-28 20:27:16 +00:00
a9f497d05f Guard against broken intra-doc links in CI (#2616) 2022-01-28 17:28:16 +00:00
cc9ba162f7 add late request dispatcher test 2022-01-27 17:00:07 +00:00
37799df978 add basic dispatcher test 2022-01-27 06:42:54 +00:00
0d93a8c273 add examples readme 2022-01-27 06:32:28 +00:00
3ae4f0a629 add keep-alive dispatcher tests 2022-01-27 06:29:46 +00:00
14a4f325d3 move dispatcher tests to own file 2022-01-27 06:06:55 +00:00
1bd2076b35 prevent drive traversal in windows 2022-01-25 16:44:05 +00:00
5454699bab propagate response error in all necessary places 2022-01-24 11:56:23 +00:00
d7c5c966d2 remove impl Future for HttpResponse (#2601) 2022-01-24 11:56:01 +00:00
50894e392e document new body map types 2022-01-23 23:26:35 +00:00
008753f07a improve body docs 2022-01-23 03:57:08 +00:00
c92aa31f91 document full percent-decoding of web::Path 2022-01-22 16:17:46 +00:00
c25dd23820 move path impls to derives 2022-01-22 04:02:34 +00:00
acacb90b2e add actix-http 2.2.2 changelog 2022-01-21 21:24:09 +00:00
8459f566a8 fix brotli encoding buffer size 2022-01-21 21:17:07 +00:00
232a14dc8b prepare actix-files release 0.6.0-beta.15 2022-01-21 20:27:29 +00:00
6e9f5fba24 prepare awc release 3.0.0-beta.19 2022-01-21 20:25:46 +00:00
c5d6df0078 prepare actix-web release 4.0.0-beta.21 2022-01-21 20:23:29 +00:00
8865540f3b prepare actix-http release 3.0.0-beta.19 2022-01-21 20:21:49 +00:00
141790b200 use camel case in special headers
fixes #2595
2022-01-21 20:15:43 +00:00
9668a2396f prepare actix-router release 0.5.0-rc.2 2022-01-21 17:21:46 +00:00
cb7347216c add Logger::log_target (#2594) 2022-01-21 17:19:17 +00:00
ae7f71e317 remove ambiguous HttpResponseBuilder::del_cookie (#2591) 2022-01-21 17:18:07 +00:00
bc89f0bfc2 s/example/examples 2022-01-21 16:56:33 +00:00
c959916346 fmt codegen 2022-01-20 01:54:57 +00:00
f227e880d7 refactor route codegen to be cleaner 2022-01-20 01:53:02 +00:00
68ad81f989 remove debug logs 2022-01-20 01:30:33 +00:00
f2e736719a add url_for test for conflicting named resources 2022-01-20 01:30:33 +00:00
81ef12a0fd add warn log to from_parts if given request is cloned
closes #2562
2022-01-19 22:23:53 +00:00
1bc1538118 use tokio::main in client example 2022-01-19 21:36:14 +00:00
1cc3e7b24c deprecate Path::path (#2590) 2022-01-19 20:26:33 +00:00
3dd98c308c document Path::unprocessed panic 2022-01-19 18:33:23 +00:00
cb5d9a7e64 bump deps to stable actix-server v2 2022-01-19 16:58:11 +00:00
5ee555462f add HttpResponse::add_removal_cookie (#2586) 2022-01-19 16:36:11 +00:00
ad159f5219 fix ClientResponse::body doc
fixes #2589
2022-01-19 15:52:16 +00:00
2ffc21dd4f move response extensions out of head (#2585) 2022-01-19 02:09:25 +00:00
7b8a392ef5 allow camel case response headers (#2587) 2022-01-16 03:16:26 +00:00
3c7ccf5521 update http changelog 2022-01-15 15:43:18 +00:00
e7cae5a95b migrate to brotli crate (#2538) 2022-01-15 14:03:16 +00:00
455d5c460d prepare actix-files release 0.6.0-beta.14 2022-01-14 20:01:11 +00:00
8faca783fa prepare actix-web release 4.0.0-beta.20 2022-01-14 20:00:26 +00:00
edbb9b047e prepare actix-router release 0.5.0-rc.1 2022-01-14 19:59:36 +00:00
32742d0715 support opaque app in test helpers (#2584) 2022-01-14 19:45:32 +00:00
d90c1a2331 convert error in Result extractor (#2581)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2022-01-12 18:59:22 +00:00
2a12b41456 fix support for 12 extractors (#2582)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2022-01-12 18:31:48 +00:00
6c97d448b7 update tokio-uring to 0.2 (#2583) 2022-01-12 17:53:36 +00:00
c3ce33df05 unify generics across App, Scope and Resource (#2572)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2022-01-05 15:02:28 +00:00
4431c8da65 fix bench 2022-01-05 14:10:38 +00:00
2d11ab5977 Add ServiceConfig::configure (#1988)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2022-01-05 12:31:39 +00:00
4ebf16890d add GuardContext::header (#2569) 2022-01-05 11:47:14 +00:00
fe0bbfb3da optimize PathDeserializer (#2570) 2022-01-05 10:48:20 +00:00
2462b6dd5d generalize impl Responder for HttpResponse (#2567)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2022-01-05 04:42:52 +00:00
49cfabeaf5 simplify Resource trait (#2568)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2022-01-05 04:34:13 +00:00
0f7292c69a remove readme msrv link 2022-01-05 04:24:40 +00:00
8bbf2b5052 prepare actix-test release 0.1.0-beta.11 2022-01-04 15:37:48 +00:00
8c975bcc1f prepare actix-http-test release 3.0.0-beta.11 2022-01-04 15:37:33 +00:00
742ad56d30 prepare actix-web-actors release 4.0.0-beta.10 2022-01-04 15:37:14 +00:00
bcc8d5c441 prepare actix-multipart release 0.4.0-beta.12 2022-01-04 15:36:56 +00:00
f659098d21 prepare awc release 3.0.0-beta.18 2022-01-04 15:35:21 +00:00
8621ae12f8 prepare actix-web release 4.0.0-beta.19 2022-01-04 15:35:08 +00:00
b338eb8473 prepare actix-http release 3.0.0-beta.18 2022-01-04 15:34:52 +00:00
5abd1c2c2c prepare actix-web-codegen release 0.5.0-rc.1 2022-01-04 15:34:16 +00:00
05336269f9 prepare actix-router release 0.5.0-beta.4 2022-01-04 15:33:44 +00:00
86df295ee2 fully percent decode path segments when capturing (#2566) 2022-01-04 15:19:29 +00:00
85c9b1a263 move quoter 2022-01-04 12:58:40 +00:00
577597a80a rename on-connect example 2022-01-04 12:54:20 +00:00
374dc9bfc9 files: percent-decode url path (#2398)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2022-01-04 12:54:11 +00:00
93754f307f try path config from Data as well 2022-01-04 04:08:46 +00:00
c7639bc3be document quoter 2022-01-04 03:48:12 +00:00
0bc4ae9158 remove BodyEncoding trait (#2565) 2022-01-03 18:46:04 +00:00
19a46e3925 fix doc test 2022-01-03 15:35:47 +00:00
68cd853aa2 improve docs for Compress 2022-01-03 14:59:01 +00:00
25fe1bbaa5 add double compress layer test 2022-01-03 14:05:08 +00:00
e890307091 Fix AcceptEncoding header (#2501) 2022-01-03 13:17:57 +00:00
b708924590 only run nightly checks on master ci 2021-12-31 08:38:58 +00:00
5dcb250237 fix doc test 2021-12-31 07:53:53 +00:00
b4ff6addfe use match name if possible in data debug log 2021-12-30 07:15:57 +00:00
231a24ef8d improve application data docs 2021-12-30 07:11:35 +00:00
6df4974234 prepare awc release 3.0.0-beta.17 2021-12-29 10:17:28 +00:00
a80e93d6db prepare actix-web release 4.0.0-beta.18 2021-12-29 10:17:11 +00:00
542c92c9a7 tweak changelogs 2021-12-29 10:06:36 +00:00
74738c63a7 Upgrade time dependency (via cookie) (#2555) 2021-12-29 10:03:25 +00:00
a87e01f0d1 bump msrv to 1.54 2021-12-29 08:59:15 +00:00
9779010a5a prepare actix-files release 0.6.0-beta.12 2021-12-29 07:08:10 +00:00
11d50d792b prepare actix-web release 4.0.0-beta.17 2021-12-29 07:07:51 +00:00
798e9911e9 prepare awc release 3.0.0-beta.16 2021-12-29 07:07:46 +00:00
2b2de29800 never return port in realip_remote_addr (#2554) 2021-12-28 14:52:43 +00:00
0f5c876c6b tweak guard docs 2021-12-28 14:50:48 +00:00
96a4dc9dec use modern signatures for awc send_* and header methods (#2553) 2021-12-28 03:22:22 +00:00
4616ca8ee6 rework Guard trait (#2552) 2021-12-28 02:37:13 +00:00
36193b0a50 specify tokio dep to avoid RUSTSEC-2021-0124 warning 2021-12-27 18:54:10 +00:00
76684a786e update server dep to rc2 (#2550) 2021-12-27 18:45:31 +00:00
2308f8afa4 use const header values where possible 2021-12-27 16:15:33 +00:00
554ae7a868 rework Handler trait (#2549) 2021-12-27 00:44:30 +00:00
ac0c4eb684 update actix-tls references to stable 3.0.0 2021-12-26 21:24:03 +00:00
2e493cf791 remove crate level clippy allows 2021-12-25 04:53:51 +00:00
5860fe5381 expose Handler trait 2021-12-25 04:43:59 +00:00
adf9935841 improve scope documentation
closes #2389
2021-12-25 03:44:09 +00:00
34e5c7c799 Improve module docs for error handler middleware (#2543) 2021-12-25 02:35:19 +00:00
01cbfc5724 reduce -http re-exports in awc 2021-12-25 02:34:35 +00:00
3756dfc2ce move client to own module 2021-12-25 02:34:31 +00:00
d2590fd46c ClientRequest::send_body takes impl MessageBody (#2546) 2021-12-25 02:33:37 +00:00
1296e07c48 relax unpin bounds on payload types (#2545) 2021-12-24 17:47:47 +00:00
7b1512d863 allow any body type in Scope (#2523) 2021-12-22 15:48:59 +00:00
cd025f5c0b allow any body type in Resource (#2526) 2021-12-22 15:00:32 +00:00
1769812d0b bump outdated deps 2021-12-22 08:43:38 +00:00
324eba7e0b tighten tokio version range to prevent RUSTSEC-2021-0124 2021-12-22 08:41:44 +00:00
b3ac918d70 update itoa to v1 2021-12-22 08:34:48 +00:00
de20d21703 use dash hyphenation in markdown 2021-12-22 08:21:30 +00:00
212c6926f9 Revert "use dash hyphenation in changelogs"
This reverts commit 1ea619f2a1.
2021-12-22 08:18:44 +00:00
1ea619f2a1 use dash hyphenation in changelogs 2021-12-22 08:17:35 +00:00
40a0162074 add tests to scope and resource for returning from fns 2021-12-22 07:58:37 +00:00
f8488aff1e upstream changelog for v3.3.3 2021-12-22 07:20:53 +00:00
64c2e5e1cd remove crate level clippy lint 2021-12-22 07:16:07 +00:00
17f636a183 split request and response modules (#2530) 2021-12-19 17:05:27 +00:00
2e00776d5e Update FUNDING.yml 2021-12-19 04:18:57 +00:00
7d507a41ee Create FUNDING.yml 2021-12-19 03:58:29 +00:00
fb036264cc only build SslConnectorBuilder once (#2503) 2021-12-18 16:44:30 +00:00
d2b9724010 update bump script to detect prerelease versions 2021-12-18 03:27:32 +00:00
5c53db1e4d remove hidden anybody 2021-12-18 01:48:16 +00:00
84ea9e7e88 http: Replace header::map::GetAll with std::slice::Iter (#2527) 2021-12-18 00:05:12 +00:00
0bd5ccc432 update changelog 2021-12-17 21:39:15 +00:00
9cd8526085 prepare actix-router release 0.5.0-beta.3 2021-12-17 21:24:34 +00:00
73bbe56971 prepare actix-test release 0.1.0-beta.9 2021-12-17 21:00:15 +00:00
8340b63b7b prepare awc release 3.0.0-beta.14 2021-12-17 20:59:59 +00:00
6c2c7b68e2 prepare actix-web release 4.0.0-beta.15 2021-12-17 20:59:01 +00:00
7bf47967cc prepare actix-http release 3.0.0-beta.16 2021-12-17 20:57:51 +00:00
ae47d96fc6 use body::None in encoder body 2021-12-17 20:56:54 +00:00
5842a3279d update messagebody documentation 2021-12-17 19:35:08 +00:00
1d6f5ba6d6 improve codegen on BoxBody poll_next 2021-12-17 19:19:21 +00:00
aa31086af5 improve BoxBody Debug impl 2021-12-17 19:16:42 +00:00
57ea322ce5 simplify MessageBody::complete_body interface (#2522) 2021-12-17 19:09:08 +00:00
2cf27863cb remove direct dep on pin-project in -http (#2524) 2021-12-17 14:13:54 +00:00
5359fa56c2 include source for dispatch body errors 2021-12-17 01:29:41 +00:00
a2467718ac passthrough StreamLog error type 2021-12-17 01:27:27 +00:00
3c0d059d92 MessageBody::boxed (#2520)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-12-17 00:43:40 +00:00
44b7302845 minimize futures-util dep in actix-http 2021-12-16 22:26:45 +00:00
a6d5776481 various fixes to MessageBody::complete_body (#2519) 2021-12-16 22:25:10 +00:00
156cc20ac8 refactor testing utils (#2518) 2021-12-15 01:44:51 +00:00
dd4a372613 allow error handler middleware to return different body type (#2515) 2021-12-14 21:17:50 +00:00
05255c7f7c remove either crate conversions (#2516) 2021-12-14 19:57:18 +00:00
fb091b2b88 split up router pattern and resource_path modules 2021-12-14 18:59:59 +00:00
11ee8ec3ab align remaining header map terminology (#2510) 2021-12-13 16:08:08 +00:00
551a0d973c doc tweaks 2021-12-13 02:58:19 +00:00
cea44be670 add test for returning App from function 2021-12-11 16:18:28 +00:00
b41b346c00 inline trivial body methods 2021-12-11 16:05:08 +00:00
5b0a50249b prepare actix-multipart release 0.4.0-beta.10 2021-12-11 00:35:26 +00:00
60b030ff53 prepare actix-web-actors release 4.0.0-beta.8 2021-12-11 00:34:23 +00:00
fc4e9ff96b prepare actix-web-codegen release 0.5.0-beta.6 2021-12-11 00:33:31 +00:00
6481a5fb73 prepare actix-test release 0.1.0-beta.8 2021-12-11 00:32:26 +00:00
0cd7c17682 prepare actix-http-test release 3.0.0-beta.9 2021-12-11 00:32:00 +00:00
ed2f5b40b9 prepare actix-files release 0.6.0-beta.10 2021-12-11 00:31:41 +00:00
cc37be9700 prepare actix-web release 4.0.0-beta.14 2021-12-11 00:30:12 +00:00
e1cdabe5cb prepare awc release 3.0.0-beta.13 2021-12-11 00:28:38 +00:00
d0f4c809ca prepare actix-http release 3.0.0-beta.15 2021-12-11 00:22:09 +00:00
65dd5dfa7b bump script updates referenced crate versions 2021-12-11 00:21:30 +00:00
f62383a975 unpin h2 2021-12-10 22:13:12 +00:00
f9348d7129 add ServiceResponse::into_parts (#2499) 2021-12-09 14:57:27 +00:00
774ac7fec4 provide optimisation path for single-chunk body types (#2497) 2021-12-09 13:52:35 +00:00
69fa17f66f clean future h2 dispatcher 2021-12-09 11:27:29 +00:00
816d68dee8 pin h2 temporarily 2021-12-09 00:46:28 +00:00
7dc034f0fb Remove extensions from head (#2487) 2021-12-08 22:58:50 +00:00
07f2fe385b standardize crate level lints 2021-12-08 06:09:56 +00:00
406f694095 standardize rustfmt max_width 2021-12-08 06:01:11 +00:00
e49e559f47 fix some docs 2021-12-08 05:43:50 +00:00
d35b7644dc add connection level data container (#2491) 2021-12-07 17:23:34 +00:00
069cf2da07 enable scope middleware with generic res body. (#2492)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-12-07 16:26:28 +00:00
6460e67f84 remove generic body type in App. (#2493) 2021-12-07 15:53:04 +00:00
9587261c20 add fakeshadow's actix-web in actix-http example 2021-12-07 15:31:15 +00:00
606a371ec3 improve Data docs 2021-12-06 17:14:56 +00:00
bed72d9bb7 fix examples 2021-12-05 23:23:36 +00:00
c596f573a6 bump actix-server to rc.1 2021-12-05 21:25:15 +00:00
627c0dc22f workaround rustdoc bug for Error (#2489) 2021-12-05 16:19:08 +00:00
2d053b7036 remove actix_http::http module (#2488) 2021-12-05 14:37:20 +00:00
59be0c65c6 disallow query or fragements in url_for constructions (#2430)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-12-05 04:39:18 +00:00
e1a2d9c606 Quality / QualityItem improvements (#2486) 2021-12-05 03:38:08 +00:00
d89c706cd6 re-instate Range typed header (#2485)
Co-authored-by: RideWindX <ridewindx@gmail.com>
2021-12-05 00:02:25 +00:00
4c9ca7196d Add WsResponseBuilder to build web socket session response (#1920)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-12-04 22:32:44 +00:00
fa7f3e6908 undeprecate App::data_factory (#2484) 2021-12-04 19:41:15 +00:00
c7c02ef99d body ergonomics v3 (#2468) 2021-12-04 19:40:47 +00:00
a2d5c5a058 Use cilent time out for h2 handshake timeout. (#2483) 2021-12-02 18:16:34 +00:00
deece8d519 re-instate accept-encoding typed header (#2482) 2021-12-02 17:04:40 +00:00
2a72bdae09 improve typed header macro (#2481) 2021-12-02 15:25:39 +00:00
075d871e63 wrap LanguageTags type in new AnyOrSome type to support wildcards (#2480) 2021-12-02 13:59:25 +00:00
c4b20df56a convert all remaining IETF RFC links to new format 2021-12-02 03:45:04 +00:00
0df275c478 update all IETF RFC links to new URL format 2021-12-01 19:42:02 +00:00
697238fadc prepare actix-multipart release 0.4.0-beta.9 2021-12-01 00:26:07 +00:00
e045418038 prepare for actix-tls rc.1 (#2474) 2021-11-30 14:12:04 +00:00
a978b417f3 use actix ready future in remaining return types 2021-11-30 13:11:41 +00:00
fa82b698b7 remove pin-project from actix-web. (#2471) 2021-11-30 11:16:53 +00:00
fc4cdf81eb expose header::map module (#2470) 2021-11-29 02:22:47 +00:00
654dc64a09 don't hang after dropping mutipart (#2463) 2021-11-29 02:00:24 +00:00
cf54388534 re-work from request macro. (#2469) 2021-11-29 01:23:27 +00:00
39243095b5 guarantee ordering of header map get_all (#2467) 2021-11-28 19:23:29 +00:00
89c6d62656 clean up multipart and field stream trait impl (#2462) 2021-11-25 00:10:53 +00:00
52bbbd1d73 Mnior cleanup of multipart API. (#2461) 2021-11-24 20:53:11 +00:00
3e6e9779dc fix big5 charset parsing 2021-11-24 20:16:15 +00:00
9bdd334bb4 add test for duplicate dynamic segent name 2021-11-23 15:57:18 +00:00
bcbbc115aa fix awc changelog 2021-11-23 15:12:55 +00:00
ab5eb7c1aa prepare actix-multipart release 0.4.0-beta.8 2021-11-22 18:48:14 +00:00
18b8ef0765 prepare actix-test release 0.1.0-beta.7 2021-11-22 18:47:43 +00:00
b806b4773c prepare actix-http-test release 3.0.0-beta.7 2021-11-22 18:46:58 +00:00
0062d99b6f prepare actix-files release 0.6.0-beta.9 2021-11-22 18:46:19 +00:00
99e6a9c26d prepare awc release 3.0.0-beta.11 2021-11-22 18:41:43 +00:00
5f5bd2184e prepare actix-web release 4.0.0-beta.12 2021-11-22 18:20:55 +00:00
88e074879d prepare actix-http release 3.0.0-beta.13 2021-11-22 18:19:09 +00:00
e7987e7429 awc: support http2 over plain tcp with feature flag (#2439)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-11-22 18:16:56 +00:00
a172f5968d prepare for actix-tls v3 beta 9 (#2456) 2021-11-22 15:37:23 +00:00
a2a42ec152 use anybody in doc test 2021-11-22 01:35:33 +00:00
dd347e0bd0 implement io-uring for actix-files (#2408)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-11-22 01:19:09 +00:00
194a691537 files: 304 Not Modified responses omit Content-Length header (#2453) 2021-11-19 14:04:12 +00:00
56ee97f722 add files path traversal tests 2021-11-18 18:14:34 +00:00
66620a1012 simplify handler.rs (#2450) 2021-11-17 20:11:35 +00:00
e33618ed6d ensure content disposition header in multipart (#2451)
Co-authored-by: Craig Pastro <craig.pastro@gmail.com>
2021-11-17 17:44:50 +00:00
1fe309bcc6 increase ci test timeout 2021-11-17 15:32:42 +00:00
168a7284d3 fix actix_http::Error conversion. (#2449) 2021-11-17 13:13:05 +00:00
68a3acb9c2 bump zstd dep 2021-11-16 23:22:29 +00:00
84c6d25fd3 bump env logger dep 2021-11-16 23:07:08 +00:00
0a135c7dc9 bump actix-codec to 0.4.1 2021-11-16 22:41:24 +00:00
668a33c793 remove internal usage of Body 2021-11-16 22:10:30 +00:00
d8cbb879dd make AnyBody generic on Body type (#2448) 2021-11-16 21:41:35 +00:00
13cf5a9e44 remove chunked encoding header for websockets 2021-11-16 16:55:45 +00:00
4df1cd78b7 simplify AnyBody and BodySize (#2446) 2021-11-16 09:21:10 +00:00
e8a0e16863 run tarpaulin on workspace 2021-11-15 18:11:51 +00:00
a2f59c02f7 bump actix-server to beta 9 (#2442) 2021-11-15 04:03:33 +00:00
2754608f3c fix codegen tests 2021-11-08 02:46:43 +00:00
c020cedb63 Log internal server errors (#2387) 2021-11-07 17:02:23 +00:00
5e554dca35 fix awc clippy warning (#2431) 2021-11-04 15:57:55 +00:00
6ec2d7b909 add keep alive to h2 through ping pong (#2433) 2021-11-04 15:15:23 +00:00
ec6d284a8e improve "data no configured" message (#2429) 2021-10-31 13:19:21 +00:00
be9530eb72 avoid building actix-tls with no-default-features (#2426) 2021-10-26 13:16:48 +01:00
855e260fdb Add html_utf8 content type. (#2423) 2021-10-26 09:24:38 +01:00
d13854505f move actix_http::client module to awc (#2425) 2021-10-26 00:37:40 +01:00
d40b6748bc remove dead dep (#2420) 2021-10-22 00:22:58 +01:00
c79b9a0df3 prepare actix-files release 0.6.0-beta.8 2021-10-20 23:32:46 +01:00
4af414064b prepare actix-multipart release 0.4.0-beta.7 2021-10-20 23:31:46 +01:00
9abe166d52 actix-web beta 10 releases (#2417) 2021-10-20 22:32:05 +01:00
c09ec6af4c split off coverage ci job 2021-10-20 02:27:30 +01:00
37f2bf5625 clippy 2021-10-20 02:06:51 +01:00
4f6f0b0137 chore: Bump rustls to 0.20.0 (#2416)
Co-authored-by: Kirill Mironov <vetrokm@gmail.com>
2021-10-20 02:00:11 +01:00
591abc37c3 add test runtime macro (#2409) 2021-10-19 17:30:32 +01:00
ad22cc4e7f bump msrv to 1.52.1 2021-10-19 01:59:28 +01:00
efdf3ab1c3 clippy 2021-10-19 01:32:58 +01:00
6b3ea4fc61 copy original route macro input with compile errors (#2410) 2021-10-14 18:06:31 +01:00
99985fc4ec web: implement into_inner for Data<T: ?Sized> (#2407) 2021-10-12 18:35:33 +01:00
a6707fb7ee Remove checked_expr (#2401) 2021-10-11 18:28:09 +01:00
a3806cde19 fix changelog 2021-09-12 22:41:08 +01:00
efefa0d0ce web: add option to not require content type header for Json (#2362)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-09-11 17:27:50 +01:00
450ff5fa1d improve extract docs (#2384) 2021-09-11 16:48:47 +01:00
8ae278cb68 Remove FromRequest::Config (#2233)
Co-authored-by: Jonas Platte <jplatte@users.noreply.github.com>
Co-authored-by: Igor Aleksanov <popzxc@yandex.ru>
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-09-11 01:11:16 +01:00
46699e3429 remove time dep from actix-http (#2383) 2021-09-11 00:01:01 +01:00
ba88d3b4bf prepare actix-web beta.9 releases (#2381)
* prepare actix-router release 0.5.0-beta.2

* prepare actix-web-codegen release 0.5.0-beta.4

* prepare actix-http release 3.0.0-beta.10

* prepare awc release 3.0.0-beta.8

* prepare actix-web release 4.0.0-beta.9

* prepare actix-http-test release 3.0.0-beta.6

* prepare actix-test release 0.1.0-beta.4

* prepare actix-files release 0.6.0-beta.7

* prepare actix-multipart release 0.4.0-beta.6

* prepare actix-web-actors release 4.0.0-beta.7

* fix http test version

* re-add patch

* update router repo url

* fix http test readme version
2021-09-09 01:35:41 +01:00
8dd30611fa accept owned strings in TestRequest::param (#2172)
* accept owned strings in TestRequest::param

* bump actix-router to 0.4.0

* update changelog

Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-09-09 00:42:40 +01:00
1383c7d701 speed up ci 2021-09-08 17:42:14 +01:00
d8a0f46f26 refactor web module (#2379) 2021-09-03 18:00:43 +01:00
53ec66caf4 Send headers within the redirect requests. (#2310) 2021-09-01 20:16:41 +01:00
93112644d3 non exhaustive content encoding (#2377) 2021-09-01 09:53:26 +01:00
ddc8c16cb3 Fix quality parse error in Accept-Encoding HTTP header (#2344) 2021-09-01 09:08:29 +01:00
373b3f91df rework ResourceMap internals (#2337) 2021-09-01 04:48:43 +01:00
7d01ece355 ResourceDef: support multiple-patterns as prefix (#2356)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-08-31 14:15:22 +01:00
c50eef6166 "deprecate" calls to NormalizePath::default 2021-08-31 04:19:02 +01:00
dade818eba add middleware composition tests (#2375) 2021-08-31 04:18:54 +01:00
ae35e69382 use rust 1.51 features 2021-08-31 02:52:29 +01:00
5128b1bdfc bump msrv to 1.51 2021-08-30 23:19:03 +01:00
168b2f227d compile time validation of path (#2350)
* compile time validation of path

* added trybuild err message

* Update Cargo.toml

* add changelog entry

* test more cases of path validation

* fmt

Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-08-30 21:50:40 +01:00
4bb32fb19b [fix] Bump actix-http dependency to 3.0.0-beta.9, up from 3.0.0-beta.8 (#2360)
Fixes https://rustsec.org/advisories/RUSTSEC-2021-0081
2021-08-30 20:07:12 +01:00
f9da6e48e0 ResourceDef: define behavior for prefix with trailing slash (#2355)
* ResourceDef: define behavior

* fix tests

* add scope test

* revert firestorm bump

* update changelog

* fmt

Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-08-30 20:05:49 +01:00
ff07816b65 update httparse for uninit header parsing (#2374) 2021-08-29 01:42:22 +01:00
5f412c67db clippy 2021-08-13 18:49:58 +01:00
a0c0bff944 Don't create a slice to potential uninit data on h1 encoder (#2364)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-08-13 18:41:19 +01:00
384164cc14 update graphs 2021-08-12 21:04:26 +01:00
e965d8298f HRS security fixes (#2363) 2021-08-12 20:18:09 +01:00
f6e69919ed update to router 0.5.0 beta (#2339) 2021-08-06 22:42:31 +01:00
293c52c3ef re-export ServiceFactory (#2325) 2021-07-12 16:55:41 +01:00
5a14ffeef2 clippy fixes (#2296) 2021-07-12 16:55:24 +01:00
7ae132cb68 Update MIGRATION.md (#2315)
Minor edit
2021-07-12 02:02:19 +01:00
d8deed0475 fix tests with tokio 1.8.1 (#2317) 2021-07-09 23:57:21 +01:00
2504c2ecb0 Move dev module to separate file, update description (#2293) 2021-06-27 07:44:56 +01:00
604be5495f prepare beta.8 releases (#2292) 2021-06-26 16:33:36 +01:00
262c6bc828 Various refactorings (#2281)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-06-26 15:33:43 +01:00
5eba95b731 simplify ConnectionInfo::new (#2282) 2021-06-26 00:39:06 +01:00
09afd033fc files: file path filtering closure (#2274)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-06-25 14:21:57 +01:00
539697292a fix scope and resource middleware data access (#2288) 2021-06-25 13:19:42 +01:00
5a480d1d78 re-add serde error impls 2021-06-25 12:28:04 +01:00
9a26393375 Remove duplicated step from CI workflow (#2289) 2021-06-25 12:27:22 +01:00
2eacb735a4 Don't leak internal macros (#2290) 2021-06-25 12:25:50 +01:00
767e4efe22 Remove downcast macro from actix-http (#2291) 2021-06-25 10:53:53 +01:00
e559a197cc remove comment 2021-06-24 15:30:11 +01:00
93aa86e30b clippy 2021-06-24 15:11:01 +01:00
2d8d2f5ab0 app data doc improvements 2021-06-24 15:10:51 +01:00
083ee05d50 Route::service (#2262)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-06-23 21:30:06 +01:00
ed0516d724 try to fix doc test failures (#2284) 2021-06-23 20:47:17 +01:00
7535a1ade8 Note that Form cannot require data ordering (#2283) 2021-06-23 16:54:25 +01:00
8846808804 ServiceRequest::parts_mut (#2177) 2021-06-23 00:42:00 +01:00
3b6333e65f Propagate error cause to middlewares (#2280) 2021-06-22 22:22:33 +01:00
b1148fd735 Implement FromRequest for request parts (#2263)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-06-22 17:32:03 +01:00
12f7720309 deprecate App::data and App::data_factory (#2271) 2021-06-22 15:50:58 +01:00
2d8530feb3 chore: bump actix to 0.12.0 in actix-web-actors (#2277)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-06-22 14:00:28 +01:00
7faeffc5ab prepare actix-test release 0.1.0-beta.3 2021-06-20 19:47:42 +01:00
f81d4bdae7 remove unused private hidden methods 2021-06-19 23:40:30 +01:00
6893773280 files: allow show_files_listing() with index_file() (#2228) 2021-06-19 21:00:31 +01:00
73a655544e tweak compress feature docs 2021-06-19 20:23:06 +01:00
baa5a663c4 Select compression algorithm using features flags (#2250)
Add compress-* feature flags in actix-http / actix-web / awc.
This allow enable / disable not wanted compression algorithm.
2021-06-19 20:21:13 +01:00
c260fb1c48 beta.7 releases (#2266) 2021-06-19 11:51:20 +01:00
532f7b9923 refined error model (#2253) 2021-06-17 17:57:58 +01:00
bb0331ae28 fix cargo cache on msrv 2021-06-17 16:49:31 +01:00
8d124713fc files: inline disposition for common web app file types (#2257) 2021-06-16 20:33:22 +01:00
fb2b362b60 Adjust JSON limit to 2MB and report on sizes (#2162)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-06-16 15:52:49 +01:00
75f65fea4f Extends Rustls ALPN protocols instead of replacing them when creating Rustls based services (#2226) 2021-06-10 16:25:21 +01:00
812269d656 clarify docs for BodyEncoding::encoding() (#2258) 2021-06-10 15:38:35 +01:00
e46cda5228 Deduplicate rt::main macro logic (#2255) 2021-06-08 22:44:56 +01:00
2e1d761854 add Seal argument to sealed AsHeaderName methods (#2252) 2021-06-08 12:57:19 +01:00
b1e841f168 Don't normalize URIs with no valid path (#2246) 2021-06-05 17:19:45 +01:00
0bb035cfa7 Add information about Actix discord server (#2247) 2021-06-04 02:54:40 +01:00
3479293416 Add zstd ContentEncoding support (#2244)
Co-authored-by: Igor Aleksanov <popzxc@yandex.ru>
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-06-03 21:32:52 +01:00
136dac1352 Additional test coverage and tidyup (middleware::normalize) (#2243) 2021-06-03 03:28:09 +01:00
e5b713b04a files: Fix redirect_to_slash_directory() when used with show_files_listing() (#2225) 2021-05-26 10:42:29 +01:00
3847429d00 Response::from_error take impl Into<Error> (#2214) 2021-05-26 13:41:48 +09:00
bb7d33c9d4 refactor h2 dispatcher to async/await.reduce duplicate code (#2211) 2021-05-25 03:21:20 +01:00
4598a7c0cc Only run UI tests on MSRV (#2232) 2021-05-25 00:09:38 +09:00
b1de196509 Fix clippy warnings (#2217) 2021-05-15 01:13:33 +01:00
2a8c650f2c move internalerror to actix web (#2215) 2021-05-14 16:40:00 +01:00
f277b128b6 cleanup ws test (#2213) 2021-05-13 12:24:32 +01:00
4903950b22 update changelog 2021-05-09 20:15:49 +01:00
f55e8d7a11 remove error field from response 2021-05-09 20:15:48 +01:00
900c9e270e remove responsebody indirection from response (#2201) 2021-05-09 20:12:48 +01:00
a9dc1586a0 remove rogue eprintln 2021-05-07 10:14:25 +01:00
947caa3599 examples use info log level by default 2021-05-06 20:24:18 +01:00
7d1d5c8acd Expose SererBuilder::worker_max_blocking_threads (#2200) 2021-05-06 18:35:04 +01:00
ddaf8c3e43 add associated error type to MessageBody (#2183) 2021-05-05 18:36:02 +01:00
dd1a3e7675 Fix loophole in soundness of __private_get_type_id__ (#2199) 2021-05-05 11:16:12 +01:00
c17662fe39 Reduce the level of the emitted log line from error to debug. (#2196)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-05-03 00:58:14 +01:00
3a0fb3f89e Static either extract future (#2184) 2021-05-01 03:02:56 +01:00
1fcf92e11f Update dependency "language-tags" (#2188) 2021-04-28 01:23:12 +01:00
6a29a50f25 files doc wording 2021-04-22 18:37:45 +01:00
75867bd073 clean up files service docs and rename method
follow on from #2046
2021-04-22 18:31:21 +01:00
f44a0bc159 add support of filtering guards in Files of actix-files (#2046)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-04-22 18:13:13 +01:00
07036b5640 static form extract future (#2181) 2021-04-22 13:54:29 +01:00
a7cd4e85cf use stable codec 0.4.0 2021-04-21 11:14:22 +01:00
6a9c4f1026 update awc docs link, formatting (#2180) 2021-04-20 19:57:27 +01:00
427fe6bd82 improve responseerror trait docs 2021-04-19 23:16:04 +01:00
2aa674c1fd Fix perf drop in HttpResponseBuilder (#2174) 2021-04-19 23:15:57 +01:00
52bb2b5daf hide downcast macros 2021-04-19 03:42:53 +01:00
db97974dc1 make some http re-exports more accessible (#2171) 2021-04-19 03:29:38 +01:00
b9dbc58e20 content disposition methods take impl AsRef<str> 2021-04-19 02:31:11 +01:00
35f8188410 restore cookie methods on ServiceRequest 2021-04-19 02:24:20 +01:00
8ffb1f2011 update files changelog 2021-04-19 02:11:07 +01:00
26e9c80626 Named file service (#2135) 2021-04-18 23:34:51 +01:00
f462aaa7b6 prepare actix-test release 0.1.0-beta.2 2021-04-17 15:53:54 +01:00
5a162932f3 prepare awc release 3.0.0-beta.5 2021-04-17 15:30:31 +01:00
b2d6b6a70c prepare web release 4.0.0-beta.6 2021-04-17 15:28:13 +01:00
f743e885a3 prepare http release 3.0.0-beta.6 2021-04-17 15:24:18 +01:00
5747f84736 bump utils to stable v3 2021-04-17 02:07:33 +01:00
879a4cbcd8 re-export ready boilerplate macros in dev 2021-04-16 23:21:02 +01:00
2449f2555c missed one pipeline_factory 2021-04-16 20:48:37 +01:00
d8f56eee3e bump service to stable v2 2021-04-16 20:28:21 +01:00
8d88a0a9af rename header generator macros 2021-04-16 19:15:10 +01:00
845c02cb86 Add responder impl for Cow<str> (#2164) 2021-04-16 00:54:51 +01:00
64bed506c2 chore: update benchmaks to round 20 (#2163) 2021-04-15 19:11:30 +01:00
ff65f1d006 non exhaustive http errors (#2161) 2021-04-14 06:07:59 +01:00
a9f26286f9 reduce branches in h1 dispatcher poll_keepalive (#2089) 2021-04-14 05:20:45 +01:00
037ac80a32 document messagebody trait items 2021-04-14 03:23:15 +01:00
1bfdfd1f41 implement parts as assoc method 2021-04-14 02:57:28 +01:00
5202bf03c1 add some doc examples to response builder 2021-04-14 02:45:58 +01:00
387c229f28 move response builder code to own file 2021-04-14 02:12:47 +01:00
23e0c9b6e0 remove http-codes builders from actix-http (#2159) 2021-04-14 02:00:14 +01:00
02ced426fd add body to_bytes helper (#2158) 2021-04-13 13:34:22 +01:00
4442535a45 clippy 2021-04-13 12:44:38 +01:00
edd9f14752 remove unpin from body types (#2152) 2021-04-13 11:16:12 +01:00
ce50cc9523 files: Don't use canonical path when serving file (#2156) 2021-04-13 05:28:30 +01:00
981c54432c remove json and url encoded form support from -http (#2148) 2021-04-12 10:30:28 +01:00
44c55dd036 remove cookie support from -http (#2065) 2021-04-09 18:07:10 +01:00
c72d77065d derive debug where possible (#2142) 2021-04-09 03:22:51 +01:00
44a2d2214c update year in MIT license (#2143) 2021-04-09 01:28:35 +01:00
3f5a73793a make module/crate re-exports doc inline (#2141) 2021-04-08 20:51:16 +01:00
e0b2246c68 prepare test release 0.1.0-beta.1 2021-04-02 10:03:01 +01:00
e0ae8e59bf prepare actors release 4.0.0-beta.4 2021-04-02 09:55:35 +01:00
a9641e475a prepare http-test release 3.0.0-beta.4 2021-04-02 09:54:35 +01:00
05c7505563 prepare multipart release 0.4.0-beta.4 2021-04-02 09:45:31 +01:00
8561263545 prepare files release 0.6.0-beta.4 2021-04-02 09:43:51 +01:00
a32151525c prepare awc release 3.0.0-beta.4 2021-04-02 09:40:36 +01:00
546e7c5da4 prepare web release 4.0.0-beta.5 2021-04-02 09:37:51 +01:00
6fb06a720a prepare http release 3.0.0-beta.5 2021-04-02 09:27:11 +01:00
c54a0713de migrate integration testing to new crate (#2112) 2021-04-02 08:26:59 +01:00
50dc13f280 move typed headers and implement FromRequest (#2094)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-04-01 16:42:18 +01:00
c8ed8dd1a4 migrate to -utils beta 4 (#2127) 2021-04-01 15:26:13 +01:00
a807d33600 added TestServer::client_headers (#2097)
Co-authored-by: fakeshadow <24548779@qq.com>
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-04-01 06:40:10 +01:00
1f1be6fd3d add Client::headers (#2114) 2021-03-31 11:43:56 +01:00
c49fe79207 Simplify lifetime annotation in HttpServiceBuilder. Simplify PlStream (#2129) 2021-03-30 15:46:09 +01:00
f66774e30b remove From<OffsetDateTime> impl from HttpDate
fully removes time crate from public api of -http
2021-03-30 03:32:22 +01:00
1281a748d0 merge H1ServiceHandler requests into HttpServiceHandler (#2126) 2021-03-30 03:06:16 +01:00
222acfd070 Fix build for next actix-tls-beta release (#2122) 2021-03-29 13:45:48 +01:00
980ecc5f07 fix openssl windows ci 2021-03-29 13:01:37 +01:00
e8ce73b496 update dep docs 2021-03-29 11:52:59 +01:00
f954a30c34 Fix typo in CHANGES.md (#2124) 2021-03-29 10:18:05 +01:00
60f9cfbb2a Refactor actix_http::h2::service module. Reduce loc. (#2118) 2021-03-26 18:24:51 +00:00
6822bf2f58 Refactor actix_http::h1::service (#2117) 2021-03-26 16:15:04 +00:00
2f7f1fa97a fix broken pipe for h2 when client is instantly dropped (#2113) 2021-03-26 00:05:31 +00:00
8c2ce2dedb fix awc compress feature (#2116) 2021-03-25 22:47:37 +00:00
3188ef5731 don't use rust annotation on code doc blocks 2021-03-25 08:45:52 +00:00
9704beddf8 Relax MessageBody limit to 2048kb (#2110)
* relax MessageBody limit to 2048kb

* fix clippy

* Update awc/src/response.rs

Co-authored-by: Rob Ede <robjtede@icloud.com>

* fix default body limit

Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-03-24 04:44:03 -07:00
1be54efbeb Simplify service factory macro (#2108) 2021-03-23 13:42:46 +00:00
746d983849 handle header error with CustomResponder (#2093) 2021-03-20 05:18:06 +00:00
8d9de76826 Simplify handler factory macro (#2086) 2021-03-19 16:30:53 +00:00
9488757c29 Update to socket2 v0.4 (#2092) 2021-03-19 12:17:06 +00:00
351286486c fix clippy warning on nightly (#2088)
* fix clippy warning on nightly
2021-03-19 19:25:35 +08:00
78fcd0237a Format extract macro (#2087) 2021-03-19 04:08:23 +00:00
81942d31d6 fix new dyn trait lint 2021-03-19 02:03:09 +00:00
b75b5114c3 refactor actix_http connection types and connector services (#2081) 2021-03-18 17:53:22 +00:00
abcb444dd9 fix routes in Path documentation (#2084) 2021-03-18 13:21:44 +00:00
983b6904a7 unvendor openssl 2021-03-17 00:38:54 +00:00
3dc2d145ef import some traits as _ 2021-03-17 00:38:54 +00:00
c8f6d37290 rename client io trait. reduce duplicate code (#2079) 2021-03-16 16:31:14 +00:00
69dd1a9bd6 Remove ConnectionLifetime trait. Simplify Acquired handling (#2072) 2021-03-16 02:56:23 +00:00
d93314a683 fix awc readme example (#2076) 2021-03-15 10:59:42 +00:00
a55e87faaa refactor actix_http::helpers to generic over bufmut trait (#2069) 2021-03-15 02:33:51 +00:00
515d0e3fb4 change behavior of default upgrade handler (#2071) 2021-03-13 22:20:18 +00:00
22dcc31193 Fix logger middleware properly escape %% (#2067) 2021-03-11 14:12:42 +00:00
909ef0344b document client mod removal
closes #2064
2021-03-11 00:43:03 +00:00
a2b0e86632 simplify connector generic type (#2063) 2021-03-10 23:57:32 +00:00
d0c1f1a84c remove actix_http::client::pool::Protocol (#2061) 2021-03-10 01:31:50 +00:00
b62da7e86b prepare actix-web-actors release 4.0.0-beta.3 2021-03-09 23:44:26 +00:00
5e9a3eb6ae prepare actix-multipart release 0.4.0-beta.3 2021-03-09 23:40:50 +00:00
3451d6874f prepare actix-files release 0.6.0-beta.3 2021-03-09 23:39:40 +00:00
18c3783a1c prepare actix-http-test release 3.0.0-beta.3 2021-03-09 23:35:42 +00:00
4b46351d36 prepare actix-web release 4.0.0-beta.4 2021-03-09 23:31:44 +00:00
b7c406637d prepare actix-web-codegen release 0.5.0-beta.2 2021-03-09 23:27:38 +00:00
c4e5651215 update docs 2021-03-08 23:49:12 +00:00
23b0e64199 prepare awc release 3.0.0-beta.3 2021-03-08 23:15:53 +00:00
fc31b091e4 prepare http release 3.0.0-beta.4 2021-03-08 23:07:40 +00:00
effacf8fc8 fix ssl test 2021-03-08 20:51:50 +00:00
95130fcfd0 address clippy warnings 2021-03-08 20:32:19 +00:00
5e81105317 remove ka timer from h2 dispatcher (#2057) 2021-03-08 20:00:20 +00:00
5b4105e1e6 Refactor/client builder (#2053) 2021-03-07 23:57:32 +00:00
2d3a0d6038 json method receives plain serialize (#2052) 2021-03-07 22:11:39 +00:00
fe0b3f459f remove localwaker from h1::payload (#2051) 2021-03-07 21:23:42 +00:00
ca69b6577e use iota for more content-length insertions (#2050) 2021-03-07 19:29:02 +00:00
880b863f95 fix h1 client for handling expect header request (#2049) 2021-03-07 18:33:16 +00:00
78384c3ff5 make actix_http::ws::Codec::new const (#2043) 2021-03-04 19:19:01 +00:00
c1c4400c4a fix h2 tests (#2034) 2021-03-04 13:27:54 +00:00
fc6f974617 Add "name" attribute to route macro (#1934) 2021-03-04 12:38:47 +00:00
14b249b804 update to actix-0.11.0-beta.3 for actix-web-actors (#2042) 2021-03-04 11:39:29 +00:00
0195824794 Update codecov.yml 2021-03-02 13:18:16 +00:00
fb019f15b4 test(files): Fix test and remove outdated case (#2037)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-02-28 23:01:59 +00:00
abc7fd374b update example links (#2036)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-02-28 21:41:07 +00:00
cd652dca75 refactor websocket key hashing (#2035) 2021-02-28 19:55:34 +00:00
c836de44af add client middleware (#2013) 2021-02-28 18:17:08 +00:00
badae2f8fd add local_address bind for client builder (#2024) 2021-02-27 22:31:14 +00:00
1f34718ecd Use once_cell instead of lazy_static (#2029) 2021-02-27 21:55:50 +00:00
ebda60fd6b refactor boxed route (#2033) 2021-02-27 21:00:36 +00:00
d242f57758 fix tests for codecov 2021-02-27 20:58:44 +00:00
b95e1dda34 pin h2 to 0.3.0 2021-02-27 19:57:09 +00:00
8f2a97c6e3 Update README example links (#2027)
The examples repo went through a folder restructuring. More info: https://github.com/actix/examples/pull/411
2021-02-26 01:21:56 +00:00
ebaf25d55a Fix typos in CHANGES.md (#2025)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-02-24 16:19:40 +00:00
42711c23d7 Port over doc comments in route macros. (#2022)
Co-authored-by: Jonas Platte <jplatte@users.noreply.github.com>
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-02-24 12:26:56 +00:00
f6393728c7 remove usage of actix_utils::mpsc (#2023) 2021-02-24 09:08:56 +00:00
d92ab7e8e0 add msrv to clippy config (#1862) 2021-02-22 15:39:31 +00:00
5845b3965c actix-http-test: minimize features of dependencies (#2019) 2021-02-22 12:00:08 +00:00
aacec30ad1 reduce duplicate code (#2020) 2021-02-22 11:15:12 +00:00
2dbdf61c37 Inner field of web::Query is public again (#2016) (#2017)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-02-20 17:59:09 +00:00
83365058ce Fix HTTP client link (#2011) 2021-02-18 21:56:24 +00:00
3b93c62e23 Fix Json extractor to be 32kB by default (#2010) 2021-02-18 15:20:20 +00:00
946cccaa1a refactor awc::ClientBuilder (#2008) 2021-02-18 12:30:09 +00:00
1838d9cd0f remove unused method. reduce leaf future type (#2009) 2021-02-18 11:24:10 +00:00
f62a982a51 simplify the match on h1 message type (#2006) 2021-02-18 10:38:27 +00:00
dfd9dc40ea remove awc::connect::connect trait. (#2004) 2021-02-17 17:10:46 +00:00
5efea652e3 add ClientResponse::timeout (#1931) 2021-02-17 11:55:11 +00:00
dfa795ff9d return poll in poll_flush (#2005) 2021-02-17 11:18:31 +00:00
2cc6b47fcf Use http-range library for HttpRange (#2003) 2021-02-16 18:48:16 +00:00
117025a96b simplify client::connection::Connection trait (#1998) 2021-02-16 14:10:22 +00:00
3e0a9b99ff update rust-cache action 2021-02-16 09:28:14 +00:00
17b3e7e225 pool doc nits (#1999) 2021-02-16 09:08:30 +00:00
c065729468 rework client connection pool (#1994) 2021-02-16 08:27:14 +00:00
55db3ec65c split up http body module 2021-02-15 12:20:43 +00:00
0404b78b54 improve body size docs 2021-02-15 11:24:46 +00:00
68d1bd88b1 remove unused flag upgrade (#1992) 2021-02-14 18:13:05 +00:00
308b70b039 fix potential over read (#1991) 2021-02-14 17:36:18 +00:00
7fa6333a0c use rcgen for tls key generation (#1989) 2021-02-13 17:16:36 +00:00
3279070f9f optional cookies features (#1981) 2021-02-13 15:08:43 +00:00
b37669cb3b fix notify on drop (#1987) 2021-02-13 04:23:37 +00:00
1e538bf73e rework ci (#1982) 2021-02-12 21:53:21 +00:00
366c032c36 refactor DateService (#1983) 2021-02-12 21:52:58 +00:00
95113ad12f do not self wake up when have a payload (#1984) 2021-02-12 20:33:13 +00:00
ce9b2770e2 remove unused Dispatcher::new_timeout (#1985) 2021-02-12 10:37:28 +00:00
4fc7d76759 s/websocket/WebSocket in docs 2021-02-12 00:27:20 +00:00
81bef93e5e add time parser year shift tests 2021-02-12 00:15:25 +00:00
31d9ed81c5 change rustfmt line width to 96 2021-02-11 23:03:17 +00:00
c1af5089b9 add 431 and 451 status codes 2021-02-11 22:58:40 +00:00
77efc09362 hide httpmessage mod 2021-02-11 22:58:40 +00:00
871ca5e4ae stop claiming actor support 2021-02-11 22:58:40 +00:00
ceace26ed4 remove unused flag POLLED (#1980) 2021-02-11 14:19:14 -08:00
75a9a72e78 clean up poll_response. add comments (#1978) 2021-02-11 14:54:42 +00:00
d9d0d1d1a2 reduce unsafe (#1972) 2021-02-10 23:11:12 +00:00
ea5ce3befb prepare actix-http 3.0.0-beta.3 release 2021-02-10 18:36:14 +00:00
e18464b274 bump actix web versions in deps 2021-02-10 12:57:13 +00:00
bd26083f33 prepare codegen 0.5.0-beta.1 release 2021-02-10 12:45:46 +00:00
991363a104 consistent case s/web/Web 2021-02-10 12:12:03 +00:00
a290e58982 prepare beta 2 release set (#1975) 2021-02-10 12:10:03 +00:00
dcad9724bc ensure poll_flush on h1 connection disconnect (#1974) 2021-02-10 10:11:53 +00:00
949d14ae2b clean up header map (#1964) 2021-02-09 22:59:17 +00:00
a6ed4aee84 add poll_flush after a non blocked write to h1 dispatcher (#1971) 2021-02-09 22:32:46 +00:00
519d7f2b8a add trust-dns optional feature for actix-http and awc (#1969) 2021-02-09 10:41:20 +00:00
dddb623a11 add services register for tuple and vec of services (#1933) 2021-02-07 23:47:51 +00:00
266cf0622c reduce branch.remove deadcode for h1 dispatcher (#1962) 2021-02-07 22:48:27 +00:00
9604e249c9 use stable clippy (#1963) 2021-02-07 20:33:53 +00:00
dbc47c9122 optimize actix-http messages (#1914) 2021-02-07 20:19:10 +00:00
4c243cbf89 simplify methods of awc::connect::Connect trait (#1941) 2021-02-07 18:56:39 +00:00
deafb7c8b8 Improve impl ResponseError documentation (#1939)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-02-07 04:54:41 +00:00
50309aa295 Use askama-escape for html escaping (#1953) 2021-02-07 04:50:23 +00:00
9eaea6a2fd tweak feature flags 2021-02-07 03:54:58 +00:00
830fb2cdb2 properly drop h2 connection (#1926) 2021-02-07 03:51:36 +00:00
7cfed73be8 fix memory usage for h1 and read bug on buffer size. (#1929) 2021-02-07 03:20:35 +00:00
41bc04b1c4 Use immutable reference of service state. Update awc dns resolver. (#1905) 2021-02-07 01:00:40 +00:00
20cf0094e5 fix master branch build. change web::block output type. (#1957) 2021-02-06 16:23:59 +00:00
83fb4978ad fix awc test_client test (#1960) 2021-02-06 16:05:33 +00:00
51e54dac8b fix limit not working on HttpMessageBody::limit (#1938) 2021-01-27 10:49:57 +00:00
c201c15f8c Improve documentation for PayloadConfig (#1923) 2021-01-24 00:32:10 +00:00
0c8196f8b0 Remove HttpResponseBuilder::json2() (#1903)
It's not necessary to keep both json() and json2() around since the
former reduces the ownership of its parameter to a borrow only to pass
the reference to the latter. Users can instead borrow themselves when
passing an owned value: there doesn't need to be two separate functions.

This change also makes HttpResponseBuilder::json() take T: Deref so it
can accept both references and web extractors like web::Json.
2021-01-18 12:14:29 +00:00
ee10148444 revive commented out tests (#1912) 2021-01-17 05:19:32 +00:00
1c95fc2654 Refactor poll_keepalive for readability (#1901) 2021-01-16 00:15:06 +00:00
da69bb4d12 implement App::data as App::app_data(Data::new(T))) (#1906) 2021-01-15 23:37:33 +00:00
0a506bf2e9 cleanup top level doc comments 2021-01-15 05:38:50 +00:00
b2a9ba2ee4 Update PULL_REQUEST_TEMPLATE.md 2021-01-15 04:54:23 +00:00
f976150b67 return option item from Extensions::insert (#1904) 2021-01-15 04:22:42 +00:00
b1dd8d28bc response header rework (#1869) 2021-01-15 02:11:10 +00:00
4edeb5ce47 optimize ErrorHandler middleware (#1902) 2021-01-14 01:43:44 +00:00
d34a8689e5 Refactor h1 encoder (#1900) 2021-01-12 14:38:53 +00:00
a919d2de56 actix-files: Fix If-(Un)Modified to not consider sub-seconds (#1887) 2021-01-11 18:18:23 +00:00
46a8f28b74 fix actix-files doc about thread pool (#1898) 2021-01-11 17:27:33 +00:00
57398c6df1 Refactor/service request (#1893) 2021-01-11 01:29:16 +00:00
7affc6878e simplify h1 dispatcher (#1899)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-01-11 00:13:56 +00:00
46b2f7eaaf use a non leak pool for HttpRequestInner (#1889)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-01-10 22:59:44 +00:00
9e401b6ef7 refactor Scope (#1895) 2021-01-09 18:06:49 +00:00
fe392abeb4 remove actix-threadpool.use actix_rt::task::spawn_blocking (#1878) 2021-01-09 16:04:19 +00:00
f6cc829758 remove leaked box in REQUEST_POOL and RESPONSE_POOL (#1896) 2021-01-09 15:40:20 +00:00
6575ee93f2 big clean up and docs improvmenet of types mod (#1894) 2021-01-09 13:17:19 +00:00
530d03791d refactor Resource (#1883) 2021-01-09 03:36:58 +00:00
d40ae8c8ca use sync method on Responder trait (#1891) 2021-01-08 22:17:19 +00:00
2204614134 don't run awc doctests that rely on external public endpoints (#1888) 2021-01-08 12:00:58 +00:00
188ee44f81 remove copyless dependency (#1884) 2021-01-07 21:55:00 +00:00
a4c9aaf337 fix extra branch in h1 dispatcher timer (#1882) 2021-01-07 20:42:09 +00:00
c09186a2c0 prepare v4 beta releases (#1881) 2021-01-07 20:02:08 +00:00
d3c476b8c2 use env_logger builders in examples 2021-01-07 02:41:05 +00:00
dc23559f23 address clippy lints 2021-01-07 02:04:26 +00:00
6d710629af fix bug where upgrade future is not reset properly (#1880) 2021-01-07 00:57:34 +00:00
85753130d9 fmt 2021-01-07 00:35:19 +00:00
00ba8d5549 add http3 variant to protocol enum 2021-01-06 18:58:24 +00:00
51e9e1500b add docs to recent additions 2021-01-06 18:52:06 +00:00
a03dbe2dcf replace cloneable service with httpflow abstraction (#1876) 2021-01-06 18:43:52 +00:00
57a3722146 More refactor of app_service (#1879) 2021-01-06 18:11:20 +00:00
57da1d3c0f refactor app_service (#1877) 2021-01-06 11:35:30 +00:00
68117543ea major cleanup of middleware module (#1875)
* major cleanup of middleware module

* update changelog
2021-01-05 09:51:58 +00:00
4f5971d79e add Compat middleware (#1865) 2021-01-05 00:22:57 +00:00
93161df141 clean up body type (#1872) 2021-01-04 23:47:38 +00:00
e567873326 optimize message pool release (#1871) 2021-01-04 13:03:46 +00:00
7d632d0b7b use ByteString as container for websocket text message (#1864) 2021-01-04 11:27:32 +00:00
36aee18c64 fmt 2021-01-04 04:33:15 +00:00
007a145988 use ahash for internal hashmaps 2021-01-04 04:29:07 +00:00
2d4a174420 fmt 2021-01-04 01:01:35 +00:00
21f6c9d7a5 improve code readability 2021-01-04 00:49:02 +00:00
e1683313ec optimize ServiceRequest methods (#1870)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-01-04 00:32:41 +00:00
32de9f8840 Tokio 1.0 (#1813)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-01-03 23:47:04 +00:00
1f202d40e4 optimize write_camel_case in h1 encoder (#1868) 2021-01-03 16:53:01 +00:00
ad608aa64e optimize Resource and Scope service call (#1867) 2021-01-02 19:40:31 +00:00
a1b00b2cd0 change unreleased year 2021-01-02 00:12:18 +00:00
3beb4cf2da replace tinyvec with smallvec (#1866) 2021-01-01 23:18:25 +00:00
522c9a5ea6 update CoC text 2020-12-31 03:24:18 +00:00
102bb8f9ab update dot dep graphs 2020-12-29 00:22:28 +00:00
20b46cdaf9 format factory_tuple macro invocations (#1859) 2020-12-28 21:04:02 +00:00
2a2a20c3e7 bump msrv to 1.46 (#1858) 2020-12-28 00:44:15 +00:00
093d3a6c59 remove deprecated on_connect methods (#1857) 2020-12-27 23:23:30 +00:00
8c9ea43e23 address clippy warnings 2020-12-27 20:54:04 +00:00
f9fcf56d5c reduce branch in actix_http::h1::codec (#1854) 2020-12-27 20:37:53 +00:00
cbda928a33 Rename factory to handler (#1852) 2020-12-26 21:46:19 +00:00
1032f04ded remove unused actix_http::h1::OneRequest (#1853) 2020-12-26 12:46:36 +00:00
404 changed files with 51990 additions and 32803 deletions

17
.cargo/config.toml Normal file
View File

@ -0,0 +1,17 @@
[alias]
lint = "clippy --workspace --tests --examples --bins -- -Dclippy::todo"
lint-all = "clippy --workspace --all-features --tests --examples --bins -- -Dclippy::todo"
# lib checking
ci-check-min = "hack --workspace check --no-default-features"
ci-check-default = "hack --workspace check"
ci-check-default-tests = "check --workspace --tests"
ci-check-all-feature-powerset="hack --workspace --feature-powerset --skip=__compress,experimental-io-uring check"
ci-check-all-feature-powerset-linux="hack --workspace --feature-powerset --skip=__compress check"
# testing
ci-doctest-default = "test --workspace --doc --no-fail-fast -- --nocapture"
ci-doctest = "test --workspace --all-features --doc --no-fail-fast -- --nocapture"
# compile docs as docs.rs would
# RUSTDOCFLAGS="--cfg=docsrs" cargo +nightly doc --no-deps --workspace

3
.github/FUNDING.yml vendored Normal file
View File

@ -0,0 +1,3 @@
# These are supported funding model platforms
github: [robjtede]

View File

@ -33,5 +33,5 @@ Please search on the [Actix Web issue tracker](https://github.com/actix/actix-we
## Your Environment
<!--- Include as many relevant details about the environment you experienced the bug in -->
* Rust Version (I.e, output of `rustc -V`):
* Actix Web Version:
- Rust Version (I.e, output of `rustc -V`):
- Actix Web Version:

View File

@ -1,15 +1,8 @@
blank_issues_enabled: true
contact_links:
- name: GitHub Discussions
url: https://github.com/actix/actix-web/discussions
about: Actix Web Q&A
- name: Gitter chat (actix-web)
url: https://gitter.im/actix/actix-web
about: Actix Web Q&A
- name: Gitter chat (actix)
url: https://gitter.im/actix/actix
about: Actix (actor framework) Q&A
- name: Actix Discord
url: https://discord.gg/NWpN5mmg3x
about: Actix developer discussion and community chat
- name: GitHub Discussions
url: https://github.com/actix/actix-web/discussions
about: Actix Web Q&A

View File

@ -1,21 +1,21 @@
<!-- Thanks for considering contributing actix! -->
<!-- Please fill out the following to make our reviews easy. -->
<!-- Please fill out the following to get your PR reviewed quicker. -->
## PR Type
<!-- What kind of change does this PR make? -->
<!-- Bug Fix / Feature / Refactor / Code Style / Other -->
INSERT_PR_TYPE
PR_TYPE
## PR Checklist
Check your PR fulfills the following:
<!-- Check your PR fulfills the following items. -->
<!-- For draft PRs check the boxes as you complete them. -->
- [ ] Tests for the changes have been added / updated.
- [ ] Documentation comments have been added / updated.
- [ ] A changelog entry has been made for the appropriate packages.
- [ ] Format code with the latest stable rustfmt
- [ ] Format code with the latest stable rustfmt.
- [ ] (Team) Label with affected crates and semver status.
## Overview

View File

@ -1,8 +1,6 @@
name: Benchmark (Linux)
name: Benchmark
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master

185
.github/workflows/ci-post-merge.yml vendored Normal file
View File

@ -0,0 +1,185 @@
name: CI (post-merge)
on:
push:
branches: [master]
jobs:
build_and_test_nightly:
strategy:
fail-fast: false
matrix:
target:
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
- { name: Windows, os: windows-2022, triple: x86_64-pc-windows-msvc }
version:
- nightly
name: ${{ matrix.target.name }} / ${{ matrix.version }}
runs-on: ${{ matrix.target.os }}
env:
CI: 1
CARGO_INCREMENTAL: 0
VCPKGRS_DYNAMIC: 1
steps:
- uses: actions/checkout@v2
# install OpenSSL on Windows
# TODO: GitHub actions docs state that OpenSSL is
# already installed on these Windows machines somewhere
- name: Set vcpkg root
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append
- name: Install OpenSSL
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: vcpkg install openssl:x64-windows
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-${{ matrix.target.triple }}
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- name: check minimal
uses: actions-rs/cargo@v1
with: { command: ci-check-min }
- name: check default
uses: actions-rs/cargo@v1
with: { command: ci-check-default }
- name: tests
timeout-minutes: 60
run: |
cargo test --lib --tests -p=actix-router --all-features
cargo test --lib --tests -p=actix-http --all-features
cargo test --lib --tests -p=actix-web --features=rustls,openssl -- --skip=test_reading_deflate_encoding_large_random_rustls
cargo test --lib --tests -p=actix-web-codegen --all-features
cargo test --lib --tests -p=awc --all-features
cargo test --lib --tests -p=actix-http-test --all-features
cargo test --lib --tests -p=actix-test --all-features
cargo test --lib --tests -p=actix-files
cargo test --lib --tests -p=actix-multipart --all-features
cargo test --lib --tests -p=actix-web-actors --all-features
- name: tests (io-uring)
if: matrix.target.os == 'ubuntu-latest'
timeout-minutes: 60
run: >
sudo bash -c "ulimit -Sl 512
&& ulimit -Hl 512
&& PATH=$PATH:/usr/share/rust/.cargo/bin
&& RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo test --lib --tests -p=actix-files --all-features"
- name: Clear the cargo caches
run: |
cargo install cargo-cache --version 0.6.3 --no-default-features --features ci-autoclean
cargo-cache
ci_feature_powerset_check:
name: Verify Feature Combinations
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install stable
uses: actions-rs/toolchain@v1
with:
toolchain: stable-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- name: check feature combinations
uses: actions-rs/cargo@v1
with: { command: ci-check-all-feature-powerset }
- name: check feature combinations
uses: actions-rs/cargo@v1
with: { command: ci-check-all-feature-powerset-linux }
# job currently (1st Feb 2022) segfaults
# coverage:
# name: coverage
# runs-on: ubuntu-latest
# steps:
# - uses: actions/checkout@v2
# - name: Install stable
# uses: actions-rs/toolchain@v1
# with:
# toolchain: stable-x86_64-unknown-linux-gnu
# profile: minimal
# override: true
# - name: Generate Cargo.lock
# uses: actions-rs/cargo@v1
# with: { command: generate-lockfile }
# - name: Cache Dependencies
# uses: Swatinem/rust-cache@v1.2.0
# - name: Generate coverage file
# run: |
# cargo install cargo-tarpaulin --vers "^0.13"
# cargo tarpaulin --workspace --features=rustls,openssl --out Xml --verbose
# - name: Upload to Codecov
# uses: codecov/codecov-action@v1
# with: { file: cobertura.xml }
nextest:
name: nextest
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.3.0
- name: Install cargo-nextest
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-nextest
- name: Test with cargo-nextest
uses: actions-rs/cargo@v1
with:
command: nextest
args: run

120
.github/workflows/ci.yml vendored Normal file
View File

@ -0,0 +1,120 @@
name: CI
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches: [master]
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
target:
- { name: Linux, os: ubuntu-latest, triple: x86_64-unknown-linux-gnu }
- { name: macOS, os: macos-latest, triple: x86_64-apple-darwin }
- { name: Windows, os: windows-2022, triple: x86_64-pc-windows-msvc }
version:
- 1.54.0 # MSRV
- stable
name: ${{ matrix.target.name }} / ${{ matrix.version }}
runs-on: ${{ matrix.target.os }}
env:
CI: 1
CARGO_INCREMENTAL: 0
VCPKGRS_DYNAMIC: 1
steps:
- uses: actions/checkout@v2
# install OpenSSL on Windows
# TODO: GitHub actions docs state that OpenSSL is
# already installed on these Windows machines somewhere
- name: Set vcpkg root
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: echo "VCPKG_ROOT=$env:VCPKG_INSTALLATION_ROOT" | Out-File -FilePath $env:GITHUB_ENV -Append
- name: Install OpenSSL
if: matrix.target.triple == 'x86_64-pc-windows-msvc'
run: vcpkg install openssl:x64-windows
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-${{ matrix.target.triple }}
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Install cargo-hack
uses: actions-rs/cargo@v1
with:
command: install
args: cargo-hack
- name: check minimal
uses: actions-rs/cargo@v1
with: { command: ci-check-min }
- name: check default
uses: actions-rs/cargo@v1
with: { command: ci-check-default }
- name: tests
timeout-minutes: 60
run: |
cargo test --lib --tests -p=actix-router --all-features
cargo test --lib --tests -p=actix-http --all-features
cargo test --lib --tests -p=actix-web --features=rustls,openssl -- --skip=test_reading_deflate_encoding_large_random_rustls
cargo test --lib --tests -p=actix-web-codegen --all-features
cargo test --lib --tests -p=awc --all-features
cargo test --lib --tests -p=actix-http-test --all-features
cargo test --lib --tests -p=actix-test --all-features
cargo test --lib --tests -p=actix-files
cargo test --lib --tests -p=actix-multipart --all-features
cargo test --lib --tests -p=actix-web-actors --all-features
- name: tests (io-uring)
if: matrix.target.os == 'ubuntu-latest'
timeout-minutes: 60
run: >
sudo bash -c "ulimit -Sl 512
&& ulimit -Hl 512
&& PATH=$PATH:/usr/share/rust/.cargo/bin
&& RUSTUP_TOOLCHAIN=${{ matrix.version }} cargo test --lib --tests -p=actix-files --all-features"
- name: Clear the cargo caches
run: |
cargo install cargo-cache --version 0.6.3 --no-default-features --features ci-autoclean
cargo-cache
rustdoc:
name: doc tests
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust (nightly)
uses: actions-rs/toolchain@v1
with:
toolchain: nightly-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.3.0
- name: doc tests
uses: actions-rs/cargo@v1
timeout-minutes: 60
with: { command: ci-doctest }

View File

@ -1,32 +1,66 @@
name: Lint
on:
pull_request:
types: [opened, synchronize, reopened]
name: Clippy and rustfmt Check
jobs:
clippy_check:
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
components: rustfmt
override: true
- name: Check with rustfmt
uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
- uses: actions-rs/toolchain@v1
clippy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: nightly
toolchain: stable
profile: minimal
components: clippy
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with: { command: generate-lockfile }
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.2.0
- name: Check with Clippy
uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --all-features --all --tests
args: --workspace --tests --examples --all-features
lint-docs:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
profile: minimal
components: rust-docs
- name: Check for broken intra-doc links
uses: actions-rs/cargo@v1
env:
RUSTDOCFLAGS: "-D warnings"
with:
command: doc
args: --no-deps --all-features --workspace

View File

@ -1,69 +0,0 @@
name: CI (Linux)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- 1.42.0 # MSRV
- stable
- nightly
name: ${{ matrix.version }} - x86_64-unknown-linux-gnu
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --all --all-features --no-fail-fast -- --nocapture
- name: tests (actix-http)
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --package=actix-http --no-default-features --features=rustls -- --nocapture
- name: tests (awc)
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --package=awc --no-default-features --features=rustls -- --nocapture
- name: Generate coverage file
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
run: |
cargo install cargo-tarpaulin --vers "^0.13"
cargo tarpaulin --out Xml
- name: Upload to Codecov
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
uses: codecov/codecov-action@v1
with:
file: cobertura.xml

View File

@ -1,44 +0,0 @@
name: CI (macOS)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- stable
- nightly
name: ${{ matrix.version }} - x86_64-apple-darwin
runs-on: macOS-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-apple-darwin
profile: minimal
override: true
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
with:
command: test
args: --all --all-features --no-fail-fast -- --nocapture
--skip=test_h2_content_length
--skip=test_reading_deflate_encoding_large_random_rustls

View File

@ -1,14 +1,12 @@
name: Upload documentation
name: Upload Documentation
on:
push:
branches:
- master
branches: [master]
jobs:
build:
runs-on: ubuntu-latest
if: github.repository == 'actix/actix-web'
steps:
- uses: actions/checkout@v2
@ -20,14 +18,14 @@ jobs:
profile: minimal
override: true
- name: check build
- name: Build Docs
uses: actions-rs/cargo@v1
with:
command: doc
args: --no-deps --workspace --all-features
args: --workspace --all-features --no-deps
- name: Tweak HTML
run: echo "<meta http-equiv=refresh content=0;url=os_balloon/index.html>" > target/doc/index.html
run: echo '<meta http-equiv="refresh" content="0;url=actix_web/index.html">' > target/doc/index.html
- name: Deploy to GitHub Pages
uses: JamesIves/github-pages-deploy-action@3.7.1

View File

@ -1,64 +0,0 @@
name: CI (Windows)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
env:
VCPKGRS_DYNAMIC: 1
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- stable
- nightly
name: ${{ matrix.version }} - x86_64-pc-windows-msvc
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-pc-windows-msvc
profile: minimal
override: true
- name: Install OpenSSL
run: |
vcpkg integrate install
vcpkg install openssl:x64-windows
Copy-Item C:\vcpkg\installed\x64-windows\bin\libcrypto-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libcrypto.dll
Copy-Item C:\vcpkg\installed\x64-windows\bin\libssl-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libssl.dll
Get-ChildItem C:\vcpkg\installed\x64-windows\bin
Get-ChildItem C:\vcpkg\installed\x64-windows\lib
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
with:
command: test
args: --all --all-features --no-fail-fast -- --nocapture
--skip=test_h2_content_length
--skip=test_reading_deflate_encoding_large_random_rustls
--skip=test_params
--skip=test_simple
--skip=test_expect_continue
--skip=test_http10_keepalive
--skip=test_slow_request
--skip=test_connection_force_close
--skip=test_connection_server_close
--skip=test_connection_wait_queue_force_close

3
.gitignore vendored
View File

@ -16,3 +16,6 @@ guide/build/
# Configuration directory generated by CLion
.idea
# Configuration directory generated by VSCode
.vscode

3
.prettierrc.json Normal file
View File

@ -0,0 +1,3 @@
{
"proseWrap": "never"
}

View File

@ -1,608 +1,5 @@
# Changes
# Changelog
## Unreleased - 2020-xx-xx
### Changed
* Bumped `rand` to `0.8`
Changelogs are kept separately for each crate in this repo.
### Fixed
* added the actual parsing error to `test::read_body_json` [#1812]
[#1812]: https://github.com/actix/actix-web/pull/1812
## 3.3.2 - 2020-12-01
### Fixed
* Removed an occasional `unwrap` on `None` panic in `NormalizePathNormalization`. [#1762]
* Fix `match_pattern()` returning `None` for scope with empty path resource. [#1798]
* Increase minimum `socket2` version. [#1803]
[#1762]: https://github.com/actix/actix-web/pull/1762
[#1798]: https://github.com/actix/actix-web/pull/1798
[#1803]: https://github.com/actix/actix-web/pull/1803
## 3.3.1 - 2020-11-29
* Ensure `actix-http` dependency uses same `serde_urlencoded`.
## 3.3.0 - 2020-11-25
### Added
* Add `Either<A, B>` extractor helper. [#1788]
### Changed
* Upgrade `serde_urlencoded` to `0.7`. [#1773]
[#1773]: https://github.com/actix/actix-web/pull/1773
[#1788]: https://github.com/actix/actix-web/pull/1788
## 3.2.0 - 2020-10-30
### Added
* Implement `exclude_regex` for Logger middleware. [#1723]
* Add request-local data extractor `web::ReqData`. [#1748]
* Add ability to register closure for request middleware logging. [#1749]
* Add `app_data` to `ServiceConfig`. [#1757]
* Expose `on_connect` for access to the connection stream before request is handled. [#1754]
### Changed
* Updated actix-web-codegen dependency for access to new `#[route(...)]` multi-method macro.
* Print non-configured `Data<T>` type when attempting extraction. [#1743]
* Re-export bytes::Buf{Mut} in web module. [#1750]
* Upgrade `pin-project` to `1.0`.
[#1723]: https://github.com/actix/actix-web/pull/1723
[#1743]: https://github.com/actix/actix-web/pull/1743
[#1748]: https://github.com/actix/actix-web/pull/1748
[#1750]: https://github.com/actix/actix-web/pull/1750
[#1754]: https://github.com/actix/actix-web/pull/1754
[#1749]: https://github.com/actix/actix-web/pull/1749
## 3.1.0 - 2020-09-29
### Changed
* Add `TrailingSlash::MergeOnly` behaviour to `NormalizePath`, which allows `NormalizePath`
to retain any trailing slashes. [#1695]
* Remove bound `std::marker::Sized` from `web::Data` to support storing `Arc<dyn Trait>`
via `web::Data::from` [#1710]
### Fixed
* `ResourceMap` debug printing is no longer infinitely recursive. [#1708]
[#1695]: https://github.com/actix/actix-web/pull/1695
[#1708]: https://github.com/actix/actix-web/pull/1708
[#1710]: https://github.com/actix/actix-web/pull/1710
## 3.0.2 - 2020-09-15
### Fixed
* `NormalizePath` when used with `TrailingSlash::Trim` no longer trims the root path "/". [#1678]
[#1678]: https://github.com/actix/actix-web/pull/1678
## 3.0.1 - 2020-09-13
### Changed
* `middleware::normalize::TrailingSlash` enum is now accessible. [#1673]
[#1673]: https://github.com/actix/actix-web/pull/1673
## 3.0.0 - 2020-09-11
* No significant changes from `3.0.0-beta.4`.
## 3.0.0-beta.4 - 2020-09-09
### Added
* `middleware::NormalizePath` now has configurable behaviour for either always having a trailing
slash, or as the new addition, always trimming trailing slashes. [#1639]
### Changed
* Update actix-codec and actix-utils dependencies. [#1634]
* `FormConfig` and `JsonConfig` configurations are now also considered when set
using `App::data`. [#1641]
* `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`. [#1655]
* `HttpServer::maxconnrate` is renamed to the more expressive
`HttpServer::max_connection_rate`. [#1655]
[#1639]: https://github.com/actix/actix-web/pull/1639
[#1641]: https://github.com/actix/actix-web/pull/1641
[#1634]: https://github.com/actix/actix-web/pull/1634
[#1655]: https://github.com/actix/actix-web/pull/1655
## 3.0.0-beta.3 - 2020-08-17
### Changed
* Update `rustls` to 0.18
## 3.0.0-beta.2 - 2020-08-17
### Changed
* `PayloadConfig` is now also considered in `Bytes` and `String` extractors when set
using `App::data`. [#1610]
* `web::Path` now has a public representation: `web::Path(pub T)` that enables
destructuring. [#1594]
* `ServiceRequest::app_data` allows retrieval of non-Data data without splitting into parts to
access `HttpRequest` which already allows this. [#1618]
* Re-export all error types from `awc`. [#1621]
* MSRV is now 1.42.0.
### Fixed
* Memory leak of app data in pooled requests. [#1609]
[#1594]: https://github.com/actix/actix-web/pull/1594
[#1609]: https://github.com/actix/actix-web/pull/1609
[#1610]: https://github.com/actix/actix-web/pull/1610
[#1618]: https://github.com/actix/actix-web/pull/1618
[#1621]: https://github.com/actix/actix-web/pull/1621
## 3.0.0-beta.1 - 2020-07-13
### Added
* Re-export `actix_rt::main` as `actix_web::main`.
* `HttpRequest::match_pattern` and `ServiceRequest::match_pattern` for extracting the matched
resource pattern.
* `HttpRequest::match_name` and `ServiceRequest::match_name` for extracting matched resource name.
### Changed
* Fix actix_http::h1::dispatcher so it returns when HW_BUFFER_SIZE is reached. Should reduce peak memory consumption during large uploads. [#1550]
* Migrate cookie handling to `cookie` crate. Actix-web no longer requires `ring` dependency.
* MSRV is now 1.41.1
### Fixed
* `NormalizePath` improved consistency when path needs slashes added _and_ removed.
## 3.0.0-alpha.3 - 2020-05-21
### Added
* Add option to create `Data<T>` from `Arc<T>` [#1509]
### Changed
* Resources and Scopes can now access non-overridden data types set on App (or containing scopes) when setting their own data. [#1486]
* Fix audit issue logging by default peer address [#1485]
* Bump minimum supported Rust version to 1.40
* Replace deprecated `net2` crate with `socket2`
[#1485]: https://github.com/actix/actix-web/pull/1485
[#1509]: https://github.com/actix/actix-web/pull/1509
## [3.0.0-alpha.2] - 2020-05-08
### Changed
* `{Resource,Scope}::default_service(f)` handlers now support app data extraction. [#1452]
* Implement `std::error::Error` for our custom errors [#1422]
* NormalizePath middleware now appends trailing / so that routes of form /example/ respond to /example requests. [#1433]
* Remove the `failure` feature and support.
[#1422]: https://github.com/actix/actix-web/pull/1422
[#1433]: https://github.com/actix/actix-web/pull/1433
[#1452]: https://github.com/actix/actix-web/pull/1452
[#1486]: https://github.com/actix/actix-web/pull/1486
## [3.0.0-alpha.1] - 2020-03-11
### Added
* Add helper function for creating routes with `TRACE` method guard `web::trace()`
* Add convenience functions `test::read_body_json()` and `test::TestRequest::send_request()` for testing.
### Changed
* Use `sha-1` crate instead of unmaintained `sha1` crate
* Skip empty chunks when returning response from a `Stream` [#1308]
* Update the `time` dependency to 0.2.7
* Update `actix-tls` dependency to 2.0.0-alpha.1
* Update `rustls` dependency to 0.17
[#1308]: https://github.com/actix/actix-web/pull/1308
## [2.0.0] - 2019-12-25
### Changed
* Rename `HttpServer::start()` to `HttpServer::run()`
* Allow to gracefully stop test server via `TestServer::stop()`
* Allow to specify multi-patterns for resources
## [2.0.0-rc] - 2019-12-20
### Changed
* Move `BodyEncoding` to `dev` module #1220
* Allow to set `peer_addr` for TestRequest #1074
* Make web::Data deref to Arc<T> #1214
* Rename `App::register_data()` to `App::app_data()`
* `HttpRequest::app_data<T>()` returns `Option<&T>` instead of `Option<&Data<T>>`
### Fixed
* Fix `AppConfig::secure()` is always false. #1202
## [2.0.0-alpha.6] - 2019-12-15
### Fixed
* Fixed compilation with default features off
## [2.0.0-alpha.5] - 2019-12-13
### Added
* Add test server, `test::start()` and `test::start_with()`
## [2.0.0-alpha.4] - 2019-12-08
### Deleted
* Delete HttpServer::run(), it is not useful with async/await
## [2.0.0-alpha.3] - 2019-12-07
### Changed
* Migrate to tokio 0.2
## [2.0.0-alpha.1] - 2019-11-22
### Changed
* Migrated to `std::future`
* Remove implementation of `Responder` for `()`. (#1167)
## [1.0.9] - 2019-11-14
### Added
* Add `Payload::into_inner` method and make stored `def::Payload` public. (#1110)
### Changed
* Support `Host` guards when the `Host` header is unset (e.g. HTTP/2 requests) (#1129)
## [1.0.8] - 2019-09-25
### Added
* Add `Scope::register_data` and `Resource::register_data` methods, parallel to
`App::register_data`.
* Add `middleware::Condition` that conditionally enables another middleware
* Allow to re-construct `ServiceRequest` from `HttpRequest` and `Payload`
* Add `HttpServer::listen_uds` for ability to listen on UDS FD rather than path,
which is useful for example with systemd.
### Changed
* Make UrlEncodedError::Overflow more informative
* Use actix-testing for testing utils
## [1.0.7] - 2019-08-29
### Fixed
* Request Extensions leak #1062
## [1.0.6] - 2019-08-28
### Added
* Re-implement Host predicate (#989)
* Form implements Responder, returning a `application/x-www-form-urlencoded` response
* Add `into_inner` to `Data`
* Add `test::TestRequest::set_form()` convenience method to automatically serialize data and set
the header in test requests.
### Changed
* `Query` payload made `pub`. Allows user to pattern-match the payload.
* Enable `rust-tls` feature for client #1045
* Update serde_urlencoded to 0.6.1
* Update url to 2.1
## [1.0.5] - 2019-07-18
### Added
* Unix domain sockets (HttpServer::bind_uds) #92
* Actix now logs errors resulting in "internal server error" responses always, with the `error`
logging level
### Fixed
* Restored logging of errors through the `Logger` middleware
## [1.0.4] - 2019-07-17
### Added
* Add `Responder` impl for `(T, StatusCode) where T: Responder`
* Allow to access app's resource map via
`ServiceRequest::resource_map()` and `HttpRequest::resource_map()` methods.
### Changed
* Upgrade `rand` dependency version to 0.7
## [1.0.3] - 2019-06-28
### Added
* Support asynchronous data factories #850
### Changed
* Use `encoding_rs` crate instead of unmaintained `encoding` crate
## [1.0.2] - 2019-06-17
### Changed
* Move cors middleware to `actix-cors` crate.
* Move identity middleware to `actix-identity` crate.
## [1.0.1] - 2019-06-17
### Added
* Add support for PathConfig #903
* Add `middleware::identity::RequestIdentity` trait to `get_identity` from `HttpMessage`.
### Changed
* Move cors middleware to `actix-cors` crate.
* Move identity middleware to `actix-identity` crate.
* Disable default feature `secure-cookies`.
* Allow to test an app that uses async actors #897
* Re-apply patch from #637 #894
### Fixed
* HttpRequest::url_for is broken with nested scopes #915
## [1.0.0] - 2019-06-05
### Added
* Add `Scope::configure()` method.
* Add `ServiceRequest::set_payload()` method.
* Add `test::TestRequest::set_json()` convenience method to automatically
serialize data and set header in test requests.
* Add macros for head, options, trace, connect and patch http methods
### Changed
* Drop an unnecessary `Option<_>` indirection around `ServerBuilder` from `HttpServer`. #863
### Fixed
* Fix Logger request time format, and use rfc3339. #867
* Clear http requests pool on app service drop #860
## [1.0.0-rc] - 2019-05-18
### Add
* Add `Query<T>::from_query()` to extract parameters from a query string. #846
* `QueryConfig`, similar to `JsonConfig` for customizing error handling of query extractors.
### Changed
* `JsonConfig` is now `Send + Sync`, this implies that `error_handler` must be `Send + Sync` too.
### Fixed
* Codegen with parameters in the path only resolves the first registered endpoint #841
## [1.0.0-beta.4] - 2019-05-12
### Add
* Allow to set/override app data on scope level
### Changed
* `App::configure` take an `FnOnce` instead of `Fn`
* Upgrade actix-net crates
## [1.0.0-beta.3] - 2019-05-04
### Added
* Add helper function for executing futures `test::block_fn()`
### Changed
* Extractor configuration could be registered with `App::data()`
or with `Resource::data()` #775
* Route data is unified with app data, `Route::data()` moved to resource
level to `Resource::data()`
* CORS handling without headers #702
* Allow to construct `Data` instances to avoid double `Arc` for `Send + Sync` types.
### Fixed
* Fix `NormalizePath` middleware impl #806
### Deleted
* `App::data_factory()` is deleted.
## [1.0.0-beta.2] - 2019-04-24
### Added
* Add raw services support via `web::service()`
* Add helper functions for reading response body `test::read_body()`
* Add support for `remainder match` (i.e "/path/{tail}*")
* Extend `Responder` trait, allow to override status code and headers.
* Store visit and login timestamp in the identity cookie #502
### Changed
* `.to_async()` handler can return `Responder` type #792
### Fixed
* Fix async web::Data factory handling
## [1.0.0-beta.1] - 2019-04-20
### Added
* Add helper functions for reading test response body,
`test::read_response()` and test::read_response_json()`
* Add `.peer_addr()` #744
* Add `NormalizePath` middleware
### Changed
* Rename `RouterConfig` to `ServiceConfig`
* Rename `test::call_success` to `test::call_service`
* Removed `ServiceRequest::from_parts()` as it is unsafe to create from parts.
* `CookieIdentityPolicy::max_age()` accepts value in seconds
### Fixed
* Fixed `TestRequest::app_data()`
## [1.0.0-alpha.6] - 2019-04-14
### Changed
* Allow to use any service as default service.
* Remove generic type for request payload, always use default.
* Removed `Decompress` middleware. Bytes, String, Json, Form extractors
automatically decompress payload.
* Make extractor config type explicit. Add `FromRequest::Config` associated type.
## [1.0.0-alpha.5] - 2019-04-12
### Added
* Added async io `TestBuffer` for testing.
### Deleted
* Removed native-tls support
## [1.0.0-alpha.4] - 2019-04-08
### Added
* `App::configure()` allow to offload app configuration to different methods
* Added `URLPath` option for logger
* Added `ServiceRequest::app_data()`, returns `Data<T>`
* Added `ServiceFromRequest::app_data()`, returns `Data<T>`
### Changed
* `FromRequest` trait refactoring
* Move multipart support to actix-multipart crate
### Fixed
* Fix body propagation in Response::from_error. #760
## [1.0.0-alpha.3] - 2019-04-02
### Changed
* Renamed `TestRequest::to_service()` to `TestRequest::to_srv_request()`
* Renamed `TestRequest::to_response()` to `TestRequest::to_srv_response()`
* Removed `Deref` impls
### Removed
* Removed unused `actix_web::web::md()`
## [1.0.0-alpha.2] - 2019-03-29
### Added
* rustls support
### Changed
* use forked cookie
* multipart::Field renamed to MultipartField
## [1.0.0-alpha.1] - 2019-03-28
### Changed
* Complete architecture re-design.
* Return 405 response if no matching route found within resource #538
Actix Web changelog [is now here &rarr;](./actix-web/CHANGES.md).

View File

@ -8,19 +8,19 @@ In the interest of fostering an open and welcoming environment, we as contributo
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
- Using welcoming and inclusive language
- Being respectful of differing viewpoints and experiences
- Gracefully accepting constructive criticism
- Focusing on what is best for the community
- Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
- The use of sexualized language or imagery and unwelcome sexual attention or advances
- Trolling, insulting/derogatory comments, and personal or political attacks
- Public or private harassment
- Publishing others' private information, such as a physical or electronic address, without explicit permission
- Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities

View File

@ -1,123 +1,21 @@
[package]
name = "actix-web"
version = "3.3.2"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust"
readme = "README.md"
keywords = ["actix", "http", "web", "framework", "async"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-web/"
categories = ["network-programming", "asynchronous",
"web-programming::http-server",
"web-programming::websocket"]
license = "MIT OR Apache-2.0"
edition = "2018"
[package.metadata.docs.rs]
features = ["openssl", "rustls", "compress", "secure-cookies"]
[badges]
travis-ci = { repository = "actix/actix-web", branch = "master" }
codecov = { repository = "actix/actix-web", branch = "master", service = "github" }
[lib]
name = "actix_web"
path = "src/lib.rs"
[workspace]
resolver = "2"
members = [
".",
"awc",
"actix-http",
"actix-files",
"actix-multipart",
"actix-web-actors",
"actix-web-codegen",
"actix-http-test",
"actix-files",
"actix-http-test",
"actix-http",
"actix-multipart",
"actix-router",
"actix-test",
"actix-web-actors",
"actix-web-codegen",
"actix-web",
"awc",
]
[features]
default = ["compress"]
# content-encoding support
compress = ["actix-http/compress", "awc/compress"]
# sessions feature
secure-cookies = ["actix-http/secure-cookies"]
# openssl
openssl = ["actix-tls/openssl", "awc/openssl", "open-ssl"]
# rustls
rustls = ["actix-tls/rustls", "awc/rustls", "rust-tls"]
[[example]]
name = "basic"
required-features = ["compress"]
[[example]]
name = "uds"
required-features = ["compress"]
[[test]]
name = "test_server"
required-features = ["compress"]
[[example]]
name = "on_connect"
required-features = []
[[example]]
name = "client"
required-features = ["rustls"]
[dependencies]
actix-codec = "0.3.0"
actix-service = "1.0.6"
actix-utils = "2.0.0"
actix-router = "0.2.4"
actix-rt = "1.1.1"
actix-server = "1.0.0"
actix-testing = "1.0.0"
actix-macros = "0.1.0"
actix-threadpool = "0.3.1"
actix-tls = "2.0.0"
actix-web-codegen = "0.4.0"
actix-http = "2.2.0"
awc = { version = "2.0.3", default-features = false }
bytes = "0.5.3"
derive_more = "0.99.5"
encoding_rs = "0.8"
futures-channel = { version = "0.3.5", default-features = false }
futures-core = { version = "0.3.5", default-features = false }
futures-util = { version = "0.3.5", default-features = false }
fxhash = "0.2.1"
log = "0.4"
mime = "0.3"
socket2 = "0.3.16"
pin-project = "1.0.0"
regex = "1.4"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_urlencoded = "0.7"
time = { version = "0.2.7", default-features = false, features = ["std"] }
url = "2.1"
open-ssl = { package = "openssl", version = "0.10", optional = true }
rust-tls = { package = "rustls", version = "0.18.0", optional = true }
tinyvec = { version = "1", features = ["alloc"] }
[dev-dependencies]
actix = "0.10.0"
actix-http = { version = "2.1.0", features = ["actors"] }
rand = "0.8"
env_logger = "0.8"
serde_derive = "1.0"
brotli2 = "0.3.2"
flate2 = "1.0.13"
criterion = "0.3"
[profile.dev]
# Disabling debug info speeds up builds a bunch and we don't rely on it for debugging that much.
debug = 0
[profile.release]
lto = true
@ -125,18 +23,22 @@ opt-level = 3
codegen-units = 1
[patch.crates-io]
actix-web = { path = "." }
actix-files = { path = "actix-files" }
actix-http = { path = "actix-http" }
actix-http-test = { path = "actix-http-test" }
actix-web-codegen = { path = "actix-web-codegen" }
actix-multipart = { path = "actix-multipart" }
actix-files = { path = "actix-files" }
actix-router = { path = "actix-router" }
actix-test = { path = "actix-test" }
actix-web = { path = "actix-web" }
actix-web-actors = { path = "actix-web-actors" }
actix-web-codegen = { path = "actix-web-codegen" }
awc = { path = "awc" }
[[bench]]
name = "server"
harness = false
[[bench]]
name = "service"
harness = false
# uncomment for quick testing against local actix-net repo
# actix-service = { path = "../actix-net/actix-service" }
# actix-macros = { path = "../actix-net/actix-macros" }
# actix-rt = { path = "../actix-net/actix-rt" }
# actix-codec = { path = "../actix-net/actix-codec" }
# actix-utils = { path = "../actix-net/actix-utils" }
# actix-tls = { path = "../actix-net/actix-tls" }
# actix-server = { path = "../actix-net/actix-server" }

View File

@ -1,4 +1,4 @@
Copyright (c) 2017 Actix Team
Copyright (c) 2017-NOW Actix Team
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated

View File

@ -1,652 +0,0 @@
## Unreleased
## 3.0.0
* The return type for `ServiceRequest::app_data::<T>()` was changed from returning a `Data<T>` to
simply a `T`. To access a `Data<T>` use `ServiceRequest::app_data::<Data<T>>()`.
* Cookie handling has been offloaded to the `cookie` crate:
* `USERINFO_ENCODE_SET` is no longer exposed. Percent-encoding is still supported; check docs.
* Some types now require lifetime parameters.
* The time crate was updated to `v0.2`, a major breaking change to the time crate, which affects
any `actix-web` method previously expecting a time v0.1 input.
* Setting a cookie's SameSite property, explicitly, to `SameSite::None` will now
result in `SameSite=None` being sent with the response Set-Cookie header.
To create a cookie without a SameSite attribute, remove any calls setting same_site.
* actix-http support for Actors messages was moved to actix-http crate and is enabled
with feature `actors`
* content_length function is removed from actix-http.
You can set Content-Length by normally setting the response body or calling no_chunking function.
* `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a
`u64` instead of a `usize`.
* Code that was using `path.<index>` to access a `web::Path<(A, B, C)>`s elements now needs to use
destructuring or `.into_inner()`. For example:
```rust
// Previously:
async fn some_route(path: web::Path<(String, String)>) -> String {
format!("Hello, {} {}", path.0, path.1)
}
// Now (this also worked before):
async fn some_route(path: web::Path<(String, String)>) -> String {
let (first_name, last_name) = path.into_inner();
format!("Hello, {} {}", first_name, last_name)
}
// Or (this wasn't previously supported):
async fn some_route(web::Path((first_name, last_name)): web::Path<(String, String)>) -> String {
format!("Hello, {} {}", first_name, last_name)
}
```
* `middleware::NormalizePath` can now also be configured to trim trailing slashes instead of always keeping one.
It will need `middleware::normalize::TrailingSlash` when being constructed with `NormalizePath::new(...)`,
or for an easier migration you can replace `wrap(middleware::NormalizePath)` with `wrap(middleware::NormalizePath::new(TrailingSlash::MergeOnly))`.
* `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`.
* `HttpServer::maxconnrate` is renamed to the more expressive `HttpServer::max_connection_rate`.
## 2.0.0
* `HttpServer::start()` renamed to `HttpServer::run()`. It also possible to
`.await` on `run` method result, in that case it awaits server exit.
* `App::register_data()` renamed to `App::app_data()` and accepts any type `T: 'static`.
Stored data is available via `HttpRequest::app_data()` method at runtime.
* Extractor configuration must be registered with `App::app_data()` instead of `App::data()`
* Sync handlers has been removed. `.to_async()` method has been renamed to `.to()`
replace `fn` with `async fn` to convert sync handler to async
* `actix_http_test::TestServer` moved to `actix_web::test` module. To start
test server use `test::start()` or `test_start_with_config()` methods
* `ResponseError` trait has been reafctored. `ResponseError::error_response()` renders
http response.
* Feature `rust-tls` renamed to `rustls`
instead of
```rust
actix-web = { version = "2.0.0", features = ["rust-tls"] }
```
use
```rust
actix-web = { version = "2.0.0", features = ["rustls"] }
```
* Feature `ssl` renamed to `openssl`
instead of
```rust
actix-web = { version = "2.0.0", features = ["ssl"] }
```
use
```rust
actix-web = { version = "2.0.0", features = ["openssl"] }
```
* `Cors` builder now requires that you call `.finish()` to construct the middleware
## 1.0.1
* Cors middleware has been moved to `actix-cors` crate
instead of
```rust
use actix_web::middleware::cors::Cors;
```
use
```rust
use actix_cors::Cors;
```
* Identity middleware has been moved to `actix-identity` crate
instead of
```rust
use actix_web::middleware::identity::{Identity, CookieIdentityPolicy, IdentityService};
```
use
```rust
use actix_identity::{Identity, CookieIdentityPolicy, IdentityService};
```
## 1.0.0
* Extractor configuration. In version 1.0 this is handled with the new `Data` mechanism for both setting and retrieving the configuration
instead of
```rust
#[derive(Default)]
struct ExtractorConfig {
config: String,
}
impl FromRequest for YourExtractor {
type Config = ExtractorConfig;
type Result = Result<YourExtractor, Error>;
fn from_request(req: &HttpRequest, cfg: &Self::Config) -> Self::Result {
println!("use the config: {:?}", cfg.config);
...
}
}
App::new().resource("/route_with_config", |r| {
r.post().with_config(handler_fn, |cfg| {
cfg.0.config = "test".to_string();
})
})
```
use the HttpRequest to get the configuration like any other `Data` with `req.app_data::<C>()` and set it with the `data()` method on the `resource`
```rust
#[derive(Default)]
struct ExtractorConfig {
config: String,
}
impl FromRequest for YourExtractor {
type Error = Error;
type Future = Result<Self, Self::Error>;
type Config = ExtractorConfig;
fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future {
let cfg = req.app_data::<ExtractorConfig>();
println!("config data?: {:?}", cfg.unwrap().role);
...
}
}
App::new().service(
resource("/route_with_config")
.data(ExtractorConfig {
config: "test".to_string(),
})
.route(post().to(handler_fn)),
)
```
* Resource registration. 1.0 version uses generalized resource
registration via `.service()` method.
instead of
```rust
App.new().resource("/welcome", |r| r.f(welcome))
```
use App's or Scope's `.service()` method. `.service()` method accepts
object that implements `HttpServiceFactory` trait. By default
actix-web provides `Resource` and `Scope` services.
```rust
App.new().service(
web::resource("/welcome")
.route(web::get().to(welcome))
.route(web::post().to(post_handler))
```
* Scope registration.
instead of
```rust
let app = App::new().scope("/{project_id}", |scope| {
scope
.resource("/path1", |r| r.f(|_| HttpResponse::Ok()))
.resource("/path2", |r| r.f(|_| HttpResponse::Ok()))
.resource("/path3", |r| r.f(|_| HttpResponse::MethodNotAllowed()))
});
```
use `.service()` for registration and `web::scope()` as scope object factory.
```rust
let app = App::new().service(
web::scope("/{project_id}")
.service(web::resource("/path1").to(|| HttpResponse::Ok()))
.service(web::resource("/path2").to(|| HttpResponse::Ok()))
.service(web::resource("/path3").to(|| HttpResponse::MethodNotAllowed()))
);
```
* `.with()`, `.with_async()` registration methods have been renamed to `.to()` and `.to_async()`.
instead of
```rust
App.new().resource("/welcome", |r| r.with(welcome))
```
use `.to()` or `.to_async()` methods
```rust
App.new().service(web::resource("/welcome").to(welcome))
```
* Passing arguments to handler with extractors, multiple arguments are allowed
instead of
```rust
fn welcome((body, req): (Bytes, HttpRequest)) -> ... {
...
}
```
use multiple arguments
```rust
fn welcome(body: Bytes, req: HttpRequest) -> ... {
...
}
```
* `.f()`, `.a()` and `.h()` handler registration methods have been removed.
Use `.to()` for handlers and `.to_async()` for async handlers. Handler function
must use extractors.
instead of
```rust
App.new().resource("/welcome", |r| r.f(welcome))
```
use App's `to()` or `to_async()` methods
```rust
App.new().service(web::resource("/welcome").to(welcome))
```
* `HttpRequest` does not provide access to request's payload stream.
instead of
```rust
fn index(req: &HttpRequest) -> Box<Future<Item=HttpResponse, Error=Error>> {
req
.payload()
.from_err()
.fold((), |_, chunk| {
...
})
.map(|_| HttpResponse::Ok().finish())
.responder()
}
```
use `Payload` extractor
```rust
fn index(stream: web::Payload) -> impl Future<Item=HttpResponse, Error=Error> {
stream
.from_err()
.fold((), |_, chunk| {
...
})
.map(|_| HttpResponse::Ok().finish())
}
```
* `State` is now `Data`. You register Data during the App initialization process
and then access it from handlers either using a Data extractor or using
HttpRequest's api.
instead of
```rust
App.with_state(T)
```
use App's `data` method
```rust
App.new()
.data(T)
```
and either use the Data extractor within your handler
```rust
use actix_web::web::Data;
fn endpoint_handler(Data<T>)){
...
}
```
.. or access your Data element from the HttpRequest
```rust
fn endpoint_handler(req: HttpRequest) {
let data: Option<Data<T>> = req.app_data::<T>();
}
```
* AsyncResponder is removed, use `.to_async()` registration method and `impl Future<>` as result type.
instead of
```rust
use actix_web::AsyncResponder;
fn endpoint_handler(...) -> impl Future<Item=HttpResponse, Error=Error>{
...
.responder()
}
```
.. simply omit AsyncResponder and the corresponding responder() finish method
* Middleware
instead of
```rust
let app = App::new()
.middleware(middleware::Logger::default())
```
use `.wrap()` method
```rust
let app = App::new()
.wrap(middleware::Logger::default())
.route("/index.html", web::get().to(index));
```
* `HttpRequest::body()`, `HttpRequest::urlencoded()`, `HttpRequest::json()`, `HttpRequest::multipart()`
method have been removed. Use `Bytes`, `String`, `Form`, `Json`, `Multipart` extractors instead.
instead of
```rust
fn index(req: &HttpRequest) -> Responder {
req.body()
.and_then(|body| {
...
})
}
```
use
```rust
fn index(body: Bytes) -> Responder {
...
}
```
* `actix_web::server` module has been removed. To start http server use `actix_web::HttpServer` type
* StaticFiles and NamedFile have been moved to a separate crate.
instead of `use actix_web::fs::StaticFile`
use `use actix_files::Files`
instead of `use actix_web::fs::Namedfile`
use `use actix_files::NamedFile`
* Multipart has been moved to a separate crate.
instead of `use actix_web::multipart::Multipart`
use `use actix_multipart::Multipart`
* Response compression is not enabled by default.
To enable, use `Compress` middleware, `App::new().wrap(Compress::default())`.
* Session middleware moved to actix-session crate
* Actors support have been moved to `actix-web-actors` crate
* Custom Error
Instead of error_response method alone, ResponseError now provides two methods: error_response and render_response respectively. Where, error_response creates the error response and render_response returns the error response to the caller.
Simplest migration from 0.7 to 1.0 shall include below method to the custom implementation of ResponseError:
```rust
fn render_response(&self) -> HttpResponse {
self.error_response()
}
```
## 0.7.15
* The `' '` character is not percent decoded anymore before matching routes. If you need to use it in
your routes, you should use `%20`.
instead of
```rust
fn main() {
let app = App::new().resource("/my index", |r| {
r.method(http::Method::GET)
.with(index);
});
}
```
use
```rust
fn main() {
let app = App::new().resource("/my%20index", |r| {
r.method(http::Method::GET)
.with(index);
});
}
```
* If you used `AsyncResult::async` you need to replace it with `AsyncResult::future`
## 0.7.4
* `Route::with_config()`/`Route::with_async_config()` always passes configuration objects as tuple
even for handler with one parameter.
## 0.7
* `HttpRequest` does not implement `Stream` anymore. If you need to read request payload
use `HttpMessage::payload()` method.
instead of
```rust
fn index(req: HttpRequest) -> impl Responder {
req
.from_err()
.fold(...)
....
}
```
use `.payload()`
```rust
fn index(req: HttpRequest) -> impl Responder {
req
.payload() // <- get request payload stream
.from_err()
.fold(...)
....
}
```
* [Middleware](https://actix.rs/actix-web/actix_web/middleware/trait.Middleware.html)
trait uses `&HttpRequest` instead of `&mut HttpRequest`.
* Removed `Route::with2()` and `Route::with3()` use tuple of extractors instead.
instead of
```rust
fn index(query: Query<..>, info: Json<MyStruct) -> impl Responder {}
```
use tuple of extractors and use `.with()` for registration:
```rust
fn index((query, json): (Query<..>, Json<MyStruct)) -> impl Responder {}
```
* `Handler::handle()` uses `&self` instead of `&mut self`
* `Handler::handle()` accepts reference to `HttpRequest<_>` instead of value
* Removed deprecated `HttpServer::threads()`, use
[HttpServer::workers()](https://actix.rs/actix-web/actix_web/server/struct.HttpServer.html#method.workers) instead.
* Renamed `client::ClientConnectorError::Connector` to
`client::ClientConnectorError::Resolver`
* `Route::with()` does not return `ExtractorConfig`, to configure
extractor use `Route::with_config()`
instead of
```rust
fn main() {
let app = App::new().resource("/index.html", |r| {
r.method(http::Method::GET)
.with(index)
.limit(4096); // <- limit size of the payload
});
}
```
use
```rust
fn main() {
let app = App::new().resource("/index.html", |r| {
r.method(http::Method::GET)
.with_config(index, |cfg| { // <- register handler
cfg.limit(4096); // <- limit size of the payload
})
});
}
```
* `Route::with_async()` does not return `ExtractorConfig`, to configure
extractor use `Route::with_async_config()`
## 0.6
* `Path<T>` extractor return `ErrorNotFound` on failure instead of `ErrorBadRequest`
* `ws::Message::Close` now includes optional close reason.
`ws::CloseCode::Status` and `ws::CloseCode::Empty` have been removed.
* `HttpServer::threads()` renamed to `HttpServer::workers()`.
* `HttpServer::start_ssl()` and `HttpServer::start_tls()` deprecated.
Use `HttpServer::bind_ssl()` and `HttpServer::bind_tls()` instead.
* `HttpRequest::extensions()` returns read only reference to the request's Extension
`HttpRequest::extensions_mut()` returns mutable reference.
* Instead of
`use actix_web::middleware::{
CookieSessionBackend, CookieSessionError, RequestSession,
Session, SessionBackend, SessionImpl, SessionStorage};`
use `actix_web::middleware::session`
`use actix_web::middleware::session{CookieSessionBackend, CookieSessionError,
RequestSession, Session, SessionBackend, SessionImpl, SessionStorage};`
* `FromRequest::from_request()` accepts mutable reference to a request
* `FromRequest::Result` has to implement `Into<Reply<Self>>`
* [`Responder::respond_to()`](
https://actix.rs/actix-web/actix_web/trait.Responder.html#tymethod.respond_to)
is generic over `S`
* Use `Query` extractor instead of HttpRequest::query()`.
```rust
fn index(q: Query<HashMap<String, String>>) -> Result<..> {
...
}
```
or
```rust
let q = Query::<HashMap<String, String>>::extract(req);
```
* Websocket operations are implemented as `WsWriter` trait.
you need to use `use actix_web::ws::WsWriter`
## 0.5
* `HttpResponseBuilder::body()`, `.finish()`, `.json()`
methods return `HttpResponse` instead of `Result<HttpResponse>`
* `actix_web::Method`, `actix_web::StatusCode`, `actix_web::Version`
moved to `actix_web::http` module
* `actix_web::header` moved to `actix_web::http::header`
* `NormalizePath` moved to `actix_web::http` module
* `HttpServer` moved to `actix_web::server`, added new `actix_web::server::new()` function,
shortcut for `actix_web::server::HttpServer::new()`
* `DefaultHeaders` middleware does not use separate builder, all builder methods moved to type itself
* `StaticFiles::new()`'s show_index parameter removed, use `show_files_listing()` method instead.
* `CookieSessionBackendBuilder` removed, all methods moved to `CookieSessionBackend` type
* `actix_web::httpcodes` module is deprecated, `HttpResponse::Ok()`, `HttpResponse::Found()` and other `HttpResponse::XXX()`
functions should be used instead
* `ClientRequestBuilder::body()` returns `Result<_, actix_web::Error>`
instead of `Result<_, http::Error>`
* `Application` renamed to a `App`
* `actix_web::Reply`, `actix_web::Resource` moved to `actix_web::dev`

111
README.md
View File

@ -1,111 +0,0 @@
<div align="center">
<h1>Actix web</h1>
<p>
<strong>Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust</strong>
</p>
<p>
[![crates.io](https://img.shields.io/crates/v/actix-web?label=latest)](https://crates.io/crates/actix-web)
[![Documentation](https://docs.rs/actix-web/badge.svg?version=3.3.2)](https://docs.rs/actix-web/3.3.2)
[![Version](https://img.shields.io/badge/rustc-1.42+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.42.html)
![License](https://img.shields.io/crates/l/actix-web.svg)
[![Dependency Status](https://deps.rs/crate/actix-web/3.3.2/status.svg)](https://deps.rs/crate/actix-web/3.3.2)
<br />
[![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web)
[![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web)
[![Download](https://img.shields.io/crates/d/actix-web.svg)](https://crates.io/crates/actix-web)
[![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
</p>
</div>
## Features
* Supports *HTTP/1.x* and *HTTP/2*
* Streaming and pipelining
* Keep-alive and slow requests handling
* Client/server [WebSockets](https://actix.rs/docs/websockets/) support
* Transparent content compression/decompression (br, gzip, deflate)
* Powerful [request routing](https://actix.rs/docs/url-dispatch/)
* Multipart streams
* Static assets
* SSL support using OpenSSL or Rustls
* Middlewares ([Logger, Session, CORS, etc](https://actix.rs/docs/middleware/))
* Includes an async [HTTP client](https://actix.rs/actix-web/actix_web/client/index.html)
* Supports [Actix actor framework](https://github.com/actix/actix)
* Runs on stable Rust 1.42+
## Documentation
* [Website & User Guide](https://actix.rs)
* [Examples Repository](https://github.com/actix/examples)
* [API Documentation](https://docs.rs/actix-web)
* [API Documentation (master branch)](https://actix.rs/actix-web/actix_web)
## Example
Dependencies:
```toml
[dependencies]
actix-web = "3"
```
Code:
```rust
use actix_web::{get, web, App, HttpServer, Responder};
#[get("/{id}/{name}/index.html")]
async fn index(web::Path((id, name)): web::Path<(u32, String)>) -> impl Responder {
format!("Hello {}! id:{}", name, id)
}
#[actix_web::main]
async fn main() -> std::io::Result<()> {
HttpServer::new(|| App::new().service(index))
.bind("127.0.0.1:8080")?
.run()
.await
}
```
### More examples
* [Basic Setup](https://github.com/actix/examples/tree/master/basics/)
* [Application State](https://github.com/actix/examples/tree/master/state/)
* [JSON Handling](https://github.com/actix/examples/tree/master/json/)
* [Multipart Streams](https://github.com/actix/examples/tree/master/multipart/)
* [Diesel Integration](https://github.com/actix/examples/tree/master/diesel/)
* [r2d2 Integration](https://github.com/actix/examples/tree/master/r2d2/)
* [Simple WebSocket](https://github.com/actix/examples/tree/master/websocket/)
* [Tera Templates](https://github.com/actix/examples/tree/master/template_tera/)
* [Askama Templates](https://github.com/actix/examples/tree/master/template_askama/)
* [HTTPS using Rustls](https://github.com/actix/examples/tree/master/rustls/)
* [HTTPS using OpenSSL](https://github.com/actix/examples/tree/master/openssl/)
* [WebSocket Chat](https://github.com/actix/examples/tree/master/websocket-chat/)
You may consider checking out
[this directory](https://github.com/actix/examples/tree/master/) for more examples.
## Benchmarks
One of the fastest web frameworks available according to the
[TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r19).
## License
This project is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
[http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
* MIT license ([LICENSE-MIT](LICENSE-MIT) or
[http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
at your option.
## Code of Conduct
Contribution to the actix-web crate is organized under the terms of the Contributor Covenant, the
maintainers of Actix web, promises to intervene to uphold that code of conduct.

1
README.md Symbolic link
View File

@ -0,0 +1 @@
actix-web/README.md

View File

@ -1,108 +1,211 @@
# Changes
## Unreleased - 2020-xx-xx
## Unreleased - 2021-xx-xx
## 0.6.0 - 2022-02-25
- No significant changes since `0.6.0-beta.16`.
## 0.6.0-beta.16 - 2022-01-31
- No significant changes since `0.6.0-beta.15`.
## 0.6.0-beta.15 - 2022-01-21
- No significant changes since `0.6.0-beta.14`.
## 0.6.0-beta.14 - 2022-01-14
- The `prefer_utf8` option introduced in `0.4.0` is now true by default. [#2583]
[#2583]: https://github.com/actix/actix-web/pull/2583
## 0.6.0-beta.13 - 2022-01-04
- The `Files` service now rejects requests with URL paths that include `%2F` (decoded: `/`). [#2398]
- The `Files` service now correctly decodes `%25` in the URL path to `%` for the file path. [#2398]
- Minimum supported Rust version (MSRV) is now 1.54.
[#2398]: https://github.com/actix/actix-web/pull/2398
## 0.6.0-beta.12 - 2021-12-29
- No significant changes since `0.6.0-beta.11`.
## 0.6.0-beta.11 - 2021-12-27
- No significant changes since `0.6.0-beta.10`.
## 0.6.0-beta.10 - 2021-12-11
- No significant changes since `0.6.0-beta.9`.
## 0.6.0-beta.9 - 2021-11-22
- Add crate feature `experimental-io-uring`, enabling async file I/O to be utilized. This feature is only available on Linux OSes with recent kernel versions. This feature is semver-exempt. [#2408]
- Add `NamedFile::open_async`. [#2408]
- Fix 304 Not Modified responses to omit the Content-Length header, as per the spec. [#2453]
- The `Responder` impl for `NamedFile` now has a boxed future associated type. [#2408]
- The `Service` impl for `NamedFileService` now has a boxed future associated type. [#2408]
- Add `impl Clone` for `FilesService`. [#2408]
[#2408]: https://github.com/actix/actix-web/pull/2408
[#2453]: https://github.com/actix/actix-web/pull/2453
## 0.6.0-beta.8 - 2021-10-20
- Minimum supported Rust version (MSRV) is now 1.52.
## 0.6.0-beta.7 - 2021-09-09
- Minimum supported Rust version (MSRV) is now 1.51.
## 0.6.0-beta.6 - 2021-06-26
- Added `Files::path_filter()`. [#2274]
- `Files::show_files_listing()` can now be used with `Files::index_file()` to show files listing as a fallback when the index file is not found. [#2228]
[#2274]: https://github.com/actix/actix-web/pull/2274
[#2228]: https://github.com/actix/actix-web/pull/2228
## 0.6.0-beta.5 - 2021-06-17
- `NamedFile` now implements `ServiceFactory` and `HttpServiceFactory` making it much more useful in routing. For example, it can be used directly as a default service. [#2135]
- For symbolic links, `Content-Disposition` header no longer shows the filename of the original file. [#2156]
- `Files::redirect_to_slash_directory()` now works as expected when used with `Files::show_files_listing()`. [#2225]
- `application/{javascript, json, wasm}` mime type now have `inline` disposition by default. [#2257]
[#2135]: https://github.com/actix/actix-web/pull/2135
[#2156]: https://github.com/actix/actix-web/pull/2156
[#2225]: https://github.com/actix/actix-web/pull/2225
[#2257]: https://github.com/actix/actix-web/pull/2257
## 0.6.0-beta.4 - 2021-04-02
- Add support for `.guard` in `Files` to selectively filter `Files` services. [#2046]
[#2046]: https://github.com/actix/actix-web/pull/2046
## 0.6.0-beta.3 - 2021-03-09
- No notable changes.
## 0.6.0-beta.2 - 2021-02-10
- Fix If-Modified-Since and If-Unmodified-Since to not compare using sub-second timestamps. [#1887]
- Replace `v_htmlescape` with `askama_escape`. [#1953]
[#1887]: https://github.com/actix/actix-web/pull/1887
[#1953]: https://github.com/actix/actix-web/pull/1953
## 0.6.0-beta.1 - 2021-01-07
- `HttpRange::parse` now has its own error type.
- Update `bytes` to `1.0`. [#1813]
[#1813]: https://github.com/actix/actix-web/pull/1813
## 0.5.0 - 2020-12-26
* Optionally support hidden files/directories. [#1811]
- Optionally support hidden files/directories. [#1811]
[#1811]: https://github.com/actix/actix-web/pull/1811
## 0.4.1 - 2020-11-24
* Clarify order of parameters in `Files::new` and improve docs.
- Clarify order of parameters in `Files::new` and improve docs.
## 0.4.0 - 2020-10-06
* Add `Files::prefer_utf8` option that adds UTF-8 charset on certain response types. [#1714]
- Add `Files::prefer_utf8` option that adds UTF-8 charset on certain response types. [#1714]
[#1714]: https://github.com/actix/actix-web/pull/1714
## 0.3.0 - 2020-09-11
* No significant changes from 0.3.0-beta.1.
- No significant changes from 0.3.0-beta.1.
## 0.3.0-beta.1 - 2020-07-15
* Update `v_htmlescape` to 0.10
* Update `actix-web` and `actix-http` dependencies to beta.1
- Update `v_htmlescape` to 0.10
- Update `actix-web` and `actix-http` dependencies to beta.1
## 0.3.0-alpha.1 - 2020-05-23
* Update `actix-web` and `actix-http` dependencies to alpha
* Fix some typos in the docs
* Bump minimum supported Rust version to 1.40
* Support sending Content-Length when Content-Range is specified [#1384]
- Update `actix-web` and `actix-http` dependencies to alpha
- Fix some typos in the docs
- Bump minimum supported Rust version to 1.40
- Support sending Content-Length when Content-Range is specified [#1384]
[#1384]: https://github.com/actix/actix-web/pull/1384
## 0.2.1 - 2019-12-22
* Use the same format for file URLs regardless of platforms
- Use the same format for file URLs regardless of platforms
## 0.2.0 - 2019-12-20
* Fix BodyEncoding trait import #1220
- Fix BodyEncoding trait import #1220
## 0.2.0-alpha.1 - 2019-12-07
* Migrate to `std::future`
- Migrate to `std::future`
## 0.1.7 - 2019-11-06
* Add an additional `filename*` param in the `Content-Disposition` header of
- Add an additional `filename*` param in the `Content-Disposition` header of
`actix_files::NamedFile` to be more compatible. (#1151)
## 0.1.6 - 2019-10-14
* Add option to redirect to a slash-ended path `Files` #1132
- Add option to redirect to a slash-ended path `Files` #1132
## 0.1.5 - 2019-10-08
* Bump up `mime_guess` crate version to 2.0.1
* Bump up `percent-encoding` crate version to 2.1
* Allow user defined request guards for `Files` #1113
- Bump up `mime_guess` crate version to 2.0.1
- Bump up `percent-encoding` crate version to 2.1
- Allow user defined request guards for `Files` #1113
## 0.1.4 - 2019-07-20
* Allow to disable `Content-Disposition` header #686
- Allow to disable `Content-Disposition` header #686
## 0.1.3 - 2019-06-28
* Do not set `Content-Length` header, let actix-http set it #930
- Do not set `Content-Length` header, let actix-http set it #930
## 0.1.2 - 2019-06-13
* Content-Length is 0 for NamedFile HEAD request #914
* Fix ring dependency from actix-web default features for #741
- Content-Length is 0 for NamedFile HEAD request #914
- Fix ring dependency from actix-web default features for #741
## 0.1.1 - 2019-06-01
* Static files are incorrectly served as both chunked and with length #812
- Static files are incorrectly served as both chunked and with length #812
## 0.1.0 - 2019-05-25
* NamedFile last-modified check always fails due to nano-seconds in file modified date #820
- NamedFile last-modified check always fails due to nano-seconds in file modified date #820
## 0.1.0-beta.4 - 2019-05-12
* Update actix-web to beta.4
- Update actix-web to beta.4
## 0.1.0-beta.1 - 2019-04-20
* Update actix-web to beta.1
- Update actix-web to beta.1
## 0.1.0-alpha.6 - 2019-04-14
* Update actix-web to alpha6
- Update actix-web to alpha6
## 0.1.0-alpha.4 - 2019-04-08
* Update actix-web to alpha4
- Update actix-web to alpha4
## 0.1.0-alpha.2 - 2019-04-02
* Add default handler support
- Add default handler support
## 0.1.0-alpha.1 - 2019-03-28
* Initial impl
- Initial impl

View File

@ -1,13 +1,15 @@
[package]
name = "actix-files"
version = "0.5.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
version = "0.6.0"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"fakeshadow <24548779@qq.com>",
"Rob Ede <robjtede@icloud.com>",
]
description = "Static file serving for Actix Web"
readme = "README.md"
keywords = ["actix", "http", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-files/"
repository = "https://github.com/actix/actix-web"
categories = ["asynchronous", "web-programming::http-server"]
license = "MIT OR Apache-2.0"
edition = "2018"
@ -16,20 +18,31 @@ edition = "2018"
name = "actix_files"
path = "src/lib.rs"
[features]
experimental-io-uring = ["actix-web/experimental-io-uring", "tokio-uring"]
[dependencies]
actix-web = { version = "3.0.0", default-features = false }
actix-service = "1.0.6"
actix-http = "3"
actix-service = "2"
actix-utils = "3"
actix-web = { version = "4", default-features = false }
askama_escape = "0.10"
bitflags = "1"
bytes = "0.5.3"
futures-core = { version = "0.3.7", default-features = false }
futures-util = { version = "0.3.7", default-features = false }
bytes = "1"
derive_more = "0.99.5"
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
http-range = "0.1.4"
log = "0.4"
mime = "0.3"
mime_guess = "2.0.1"
percent-encoding = "2.1"
v_htmlescape = "0.12"
pin-project-lite = "0.2.7"
tokio-uring = { version = "0.2", optional = true, features = ["bytes"] }
[dev-dependencies]
actix-rt = "1.0.0"
actix-web = "3.0.0"
actix-rt = "2.2"
actix-test = "0.1.0-beta.13"
actix-web = "4.0.0"
tempfile = "3.2"

View File

@ -3,17 +3,16 @@
> Static file serving for Actix Web
[![crates.io](https://img.shields.io/crates/v/actix-files?label=latest)](https://crates.io/crates/actix-files)
[![Documentation](https://docs.rs/actix-files/badge.svg?version=0.5.0)](https://docs.rs/actix-files/0.5.0)
[![Version](https://img.shields.io/badge/rustc-1.42+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.42.html)
[![Documentation](https://docs.rs/actix-files/badge.svg?version=0.6.0)](https://docs.rs/actix-files/0.6.0)
[![Version](https://img.shields.io/badge/rustc-1.54+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.54.0.html)
![License](https://img.shields.io/crates/l/actix-files.svg)
<br />
[![dependency status](https://deps.rs/crate/actix-files/0.5.0/status.svg)](https://deps.rs/crate/actix-files/0.5.0)
[![dependency status](https://deps.rs/crate/actix-files/0.6.0/status.svg)](https://deps.rs/crate/actix-files/0.6.0)
[![Download](https://img.shields.io/crates/d/actix-files.svg)](https://crates.io/crates/actix-files)
[![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
## Documentation & Resources
- [API Documentation](https://docs.rs/actix-files/)
- [Example Project](https://github.com/actix/examples/tree/master/static_index)
- [Chat on Gitter](https://gitter.im/actix/actix-web)
- Minimum supported Rust version: 1.42 or later
- [API Documentation](https://docs.rs/actix-files)
- [Example Project](https://github.com/actix/examples/tree/master/basics/static-files)
- Minimum Supported Rust Version (MSRV): 1.54

View File

@ -1,94 +1,218 @@
use std::{
cmp, fmt,
fs::File,
future::Future,
io::{self, Read, Seek},
io,
pin::Pin,
task::{Context, Poll},
};
use actix_web::{
error::{BlockingError, Error},
web,
};
use bytes::Bytes;
use actix_web::{error::Error, web::Bytes};
use futures_core::{ready, Stream};
use futures_util::future::{FutureExt, LocalBoxFuture};
use pin_project_lite::pin_project;
use crate::handle_error;
#[cfg(feature = "experimental-io-uring")]
use bytes::BytesMut;
type ChunkedBoxFuture =
LocalBoxFuture<'static, Result<(File, Bytes), BlockingError<io::Error>>>;
use super::named::File;
#[doc(hidden)]
/// A helper created from a `std::fs::File` which reads the file
/// chunk-by-chunk on a `ThreadPool`.
pub struct ChunkedReadFile {
pub(crate) size: u64,
pub(crate) offset: u64,
pub(crate) file: Option<File>,
pub(crate) fut: Option<ChunkedBoxFuture>,
pub(crate) counter: u64,
pin_project! {
/// Adapter to read a `std::file::File` in chunks.
#[doc(hidden)]
pub struct ChunkedReadFile<F, Fut> {
size: u64,
offset: u64,
#[pin]
state: ChunkedReadFileState<Fut>,
counter: u64,
callback: F,
}
}
impl fmt::Debug for ChunkedReadFile {
#[cfg(not(feature = "experimental-io-uring"))]
pin_project! {
#[project = ChunkedReadFileStateProj]
#[project_replace = ChunkedReadFileStateProjReplace]
enum ChunkedReadFileState<Fut> {
File { file: Option<File>, },
Future { #[pin] fut: Fut },
}
}
#[cfg(feature = "experimental-io-uring")]
pin_project! {
#[project = ChunkedReadFileStateProj]
#[project_replace = ChunkedReadFileStateProjReplace]
enum ChunkedReadFileState<Fut> {
File { file: Option<(File, BytesMut)> },
Future { #[pin] fut: Fut },
}
}
impl<F, Fut> fmt::Debug for ChunkedReadFile<F, Fut> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("ChunkedReadFile")
}
}
impl Stream for ChunkedReadFile {
pub(crate) fn new_chunked_read(
size: u64,
offset: u64,
file: File,
) -> impl Stream<Item = Result<Bytes, Error>> {
ChunkedReadFile {
size,
offset,
#[cfg(not(feature = "experimental-io-uring"))]
state: ChunkedReadFileState::File { file: Some(file) },
#[cfg(feature = "experimental-io-uring")]
state: ChunkedReadFileState::File {
file: Some((file, BytesMut::new())),
},
counter: 0,
callback: chunked_read_file_callback,
}
}
#[cfg(not(feature = "experimental-io-uring"))]
async fn chunked_read_file_callback(
mut file: File,
offset: u64,
max_bytes: usize,
) -> Result<(File, Bytes), Error> {
use io::{Read as _, Seek as _};
let res = actix_web::web::block(move || {
let mut buf = Vec::with_capacity(max_bytes);
file.seek(io::SeekFrom::Start(offset))?;
let n_bytes = file.by_ref().take(max_bytes as u64).read_to_end(&mut buf)?;
if n_bytes == 0 {
Err(io::Error::from(io::ErrorKind::UnexpectedEof))
} else {
Ok((file, Bytes::from(buf)))
}
})
.await??;
Ok(res)
}
#[cfg(feature = "experimental-io-uring")]
async fn chunked_read_file_callback(
file: File,
offset: u64,
max_bytes: usize,
mut bytes_mut: BytesMut,
) -> io::Result<(File, Bytes, BytesMut)> {
bytes_mut.reserve(max_bytes);
let (res, mut bytes_mut) = file.read_at(bytes_mut, offset).await;
let n_bytes = res?;
if n_bytes == 0 {
return Err(io::ErrorKind::UnexpectedEof.into());
}
let bytes = bytes_mut.split_to(n_bytes).freeze();
Ok((file, bytes, bytes_mut))
}
#[cfg(feature = "experimental-io-uring")]
impl<F, Fut> Stream for ChunkedReadFile<F, Fut>
where
F: Fn(File, u64, usize, BytesMut) -> Fut,
Fut: Future<Output = io::Result<(File, Bytes, BytesMut)>>,
{
type Item = Result<Bytes, Error>;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
if let Some(ref mut fut) = self.fut {
return match ready!(Pin::new(fut).poll(cx)) {
Ok((file, bytes)) => {
self.fut.take();
self.file = Some(file);
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.as_mut().project();
match this.state.as_mut().project() {
ChunkedReadFileStateProj::File { file } => {
let size = *this.size;
let offset = *this.offset;
let counter = *this.counter;
self.offset += bytes.len() as u64;
self.counter += bytes.len() as u64;
if size == counter {
Poll::Ready(None)
} else {
let max_bytes = cmp::min(size.saturating_sub(counter), 65_536) as usize;
Poll::Ready(Some(Ok(bytes)))
let (file, bytes_mut) = file
.take()
.expect("ChunkedReadFile polled after completion");
let fut = (this.callback)(file, offset, max_bytes, bytes_mut);
this.state
.project_replace(ChunkedReadFileState::Future { fut });
self.poll_next(cx)
}
Err(e) => Poll::Ready(Some(Err(handle_error(e)))),
};
}
}
ChunkedReadFileStateProj::Future { fut } => {
let (file, bytes, bytes_mut) = ready!(fut.poll(cx))?;
let size = self.size;
let offset = self.offset;
let counter = self.counter;
this.state.project_replace(ChunkedReadFileState::File {
file: Some((file, bytes_mut)),
});
if size == counter {
Poll::Ready(None)
} else {
let mut file = self.file.take().expect("Use after completion");
*this.offset += bytes.len() as u64;
*this.counter += bytes.len() as u64;
self.fut = Some(
web::block(move || {
let max_bytes =
cmp::min(size.saturating_sub(counter), 65_536) as usize;
let mut buf = Vec::with_capacity(max_bytes);
file.seek(io::SeekFrom::Start(offset))?;
let n_bytes =
file.by_ref().take(max_bytes as u64).read_to_end(&mut buf)?;
if n_bytes == 0 {
return Err(io::ErrorKind::UnexpectedEof.into());
}
Ok((file, Bytes::from(buf)))
})
.boxed_local(),
);
self.poll_next(cx)
Poll::Ready(Some(Ok(bytes)))
}
}
}
}
#[cfg(not(feature = "experimental-io-uring"))]
impl<F, Fut> Stream for ChunkedReadFile<F, Fut>
where
F: Fn(File, u64, usize) -> Fut,
Fut: Future<Output = Result<(File, Bytes), Error>>,
{
type Item = Result<Bytes, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.as_mut().project();
match this.state.as_mut().project() {
ChunkedReadFileStateProj::File { file } => {
let size = *this.size;
let offset = *this.offset;
let counter = *this.counter;
if size == counter {
Poll::Ready(None)
} else {
let max_bytes = cmp::min(size.saturating_sub(counter), 65_536) as usize;
let file = file
.take()
.expect("ChunkedReadFile polled after completion");
let fut = (this.callback)(file, offset, max_bytes);
this.state
.project_replace(ChunkedReadFileState::Future { fut });
self.poll_next(cx)
}
}
ChunkedReadFileStateProj::Future { fut } => {
let (file, bytes) = ready!(fut.poll(cx))?;
this.state
.project_replace(ChunkedReadFileState::File { file: Some(file) });
*this.offset += bytes.len() as u64;
*this.counter += bytes.len() as u64;
Poll::Ready(Some(Ok(bytes)))
}
}
}
}

View File

@ -1,8 +1,8 @@
use std::{fmt::Write, fs::DirEntry, io, path::Path, path::PathBuf};
use actix_web::{dev::ServiceResponse, HttpRequest, HttpResponse};
use askama_escape::{escape as escape_html_entity, Html};
use percent_encoding::{utf8_percent_encode, CONTROLS};
use v_htmlescape::escape as escape_html_entity;
/// A directory; responds with the generated directory listing.
#[derive(Debug)]
@ -40,17 +40,26 @@ impl Directory {
pub(crate) type DirectoryRenderer =
dyn Fn(&Directory, &HttpRequest) -> Result<ServiceResponse, io::Error>;
// show file url as relative to static path
/// Returns percent encoded file URL path.
macro_rules! encode_file_url {
($path:ident) => {
utf8_percent_encode(&$path, CONTROLS)
};
}
// " -- &quot; & -- &amp; ' -- &#x27; < -- &lt; > -- &gt; / -- &#x2f;
/// Returns HTML entity encoded formatter.
///
/// ```plain
/// " => &quot;
/// & => &amp;
/// ' => &#x27;
/// < => &lt;
/// > => &gt;
/// / => &#x2f;
/// ```
macro_rules! encode_file_name {
($entry:ident) => {
escape_html_entity(&$entry.file_name().to_string_lossy())
escape_html_entity(&$entry.file_name().to_string_lossy(), Html)
};
}
@ -66,9 +75,7 @@ pub(crate) fn directory_listing(
if dir.is_visible(&entry) {
let entry = entry.unwrap();
let p = match entry.path().strip_prefix(&dir.path) {
Ok(p) if cfg!(windows) => {
base.join(p).to_string_lossy().replace("\\", "/")
}
Ok(p) if cfg!(windows) => base.join(p).to_string_lossy().replace('\\', "/"),
Ok(p) => base.join(p).to_string_lossy().into_owned(),
Err(_) => continue,
};

View File

@ -1,4 +1,4 @@
use actix_web::{http::StatusCode, HttpResponse, ResponseError};
use actix_web::{http::StatusCode, ResponseError};
use derive_more::Display;
/// Errors which can occur when serving static files.
@ -16,22 +16,30 @@ pub enum FilesError {
/// Return `NotFound` for `FilesError`
impl ResponseError for FilesError {
fn error_response(&self) -> HttpResponse {
HttpResponse::new(StatusCode::NOT_FOUND)
fn status_code(&self) -> StatusCode {
StatusCode::NOT_FOUND
}
}
#[allow(clippy::enum_variant_names)]
#[derive(Display, Debug, PartialEq)]
#[non_exhaustive]
pub enum UriSegmentError {
/// The segment started with the wrapped invalid character.
#[display(fmt = "The segment started with the wrapped invalid character")]
BadStart(char),
/// The segment contained the wrapped invalid character.
#[display(fmt = "The segment contained the wrapped invalid character")]
BadChar(char),
/// The segment ended with the wrapped invalid character.
#[display(fmt = "The segment ended with the wrapped invalid character")]
BadEnd(char),
/// The path is not a valid UTF-8 string after doing percent decoding.
#[display(fmt = "The path is not a valid UTF-8 string after percent-decoding")]
NotValidUtf8,
}
/// Return `BadRequest` for `UriSegmentError`

View File

@ -1,27 +1,35 @@
use std::{cell::RefCell, fmt, io, path::PathBuf, rc::Rc};
use std::{
cell::RefCell,
fmt, io,
path::{Path, PathBuf},
rc::Rc,
};
use actix_service::{boxed, IntoServiceFactory, ServiceFactory};
use actix_service::{boxed, IntoServiceFactory, ServiceFactory, ServiceFactoryExt};
use actix_web::{
dev::{
AppService, HttpServiceFactory, ResourceDef, ServiceRequest, ServiceResponse,
AppService, HttpServiceFactory, RequestHead, ResourceDef, ServiceRequest,
ServiceResponse,
},
error::Error,
guard::Guard,
http::header::DispositionType,
HttpRequest,
};
use futures_util::future::{ok, FutureExt, LocalBoxFuture};
use futures_core::future::LocalBoxFuture;
use crate::{
directory_listing, named, Directory, DirectoryRenderer, FilesService,
HttpNewService, MimeOverride,
directory_listing, named,
service::{FilesService, FilesServiceInner},
Directory, DirectoryRenderer, HttpNewService, MimeOverride, PathFilter,
};
/// Static files handling service.
///
/// `Files` service must be registered with `App::service()` method.
///
/// ```rust
/// # Examples
/// ```
/// use actix_web::App;
/// use actix_files::Files;
///
@ -29,7 +37,7 @@ use crate::{
/// .service(Files::new("/static", "."));
/// ```
pub struct Files {
path: String,
mount_path: String,
directory: PathBuf,
index: Option<String>,
show_index: bool,
@ -37,8 +45,10 @@ pub struct Files {
default: Rc<RefCell<Option<Rc<HttpNewService>>>>,
renderer: Rc<DirectoryRenderer>,
mime_override: Option<Rc<MimeOverride>>,
path_filter: Option<Rc<PathFilter>>,
file_flags: named::Flags,
guards: Option<Rc<dyn Guard>>,
use_guards: Option<Rc<dyn Guard>>,
guards: Vec<Rc<dyn Guard>>,
hidden_files: bool,
}
@ -58,8 +68,10 @@ impl Clone for Files {
default: self.default.clone(),
renderer: self.renderer.clone(),
file_flags: self.file_flags,
path: self.path.clone(),
mount_path: self.mount_path.clone(),
mime_override: self.mime_override.clone(),
path_filter: self.path_filter.clone(),
use_guards: self.use_guards.clone(),
guards: self.guards.clone(),
hidden_files: self.hidden_files,
}
@ -81,9 +93,9 @@ impl Files {
/// If the mount path is set as the root path `/`, services registered after this one will
/// be inaccessible. Register more specific handlers and services first.
///
/// `Files` uses a threadpool for blocking filesystem operations. By default, the pool uses a
/// number of threads equal to 5x the number of available logical CPUs. Pool size can be changed
/// by setting ACTIX_THREADPOOL environment variable.
/// `Files` utilizes the existing Tokio thread-pool for blocking filesystem operations.
/// The number of running threads is adjusted over time as needed, up to a maximum of 512 times
/// the number of server [workers](actix_web::HttpServer::workers), by default.
pub fn new<T: Into<PathBuf>>(mount_path: &str, serve_from: T) -> Files {
let orig_dir = serve_from.into();
let dir = match orig_dir.canonicalize() {
@ -95,7 +107,7 @@ impl Files {
};
Files {
path: mount_path.to_owned(),
mount_path: mount_path.trim_end_matches('/').to_owned(),
directory: dir,
index: None,
show_index: false,
@ -103,8 +115,10 @@ impl Files {
default: Rc::new(RefCell::new(None)),
renderer: Rc::new(directory_listing),
mime_override: None,
path_filter: None,
file_flags: named::Flags::default(),
guards: None,
use_guards: None,
guards: Vec::new(),
hidden_files: false,
}
}
@ -112,6 +126,9 @@ impl Files {
/// Show files listing for directories.
///
/// By default show files listing is disabled.
///
/// When used with [`Files::index_file()`], files listing is shown as a fallback
/// when the index file is not found.
pub fn show_files_listing(mut self) -> Self {
self.show_index = true;
self
@ -128,8 +145,8 @@ impl Files {
/// Set custom directory renderer
pub fn files_listing_renderer<F>(mut self, f: F) -> Self
where
for<'r, 's> F: Fn(&'r Directory, &'s HttpRequest) -> Result<ServiceResponse, io::Error>
+ 'static,
for<'r, 's> F:
Fn(&'r Directory, &'s HttpRequest) -> Result<ServiceResponse, io::Error> + 'static,
{
self.renderer = Rc::new(f);
self
@ -144,10 +161,45 @@ impl Files {
self
}
/// Sets path filtering closure.
///
/// The path provided to the closure is relative to `serve_from` path.
/// You can safely join this path with the `serve_from` path to get the real path.
/// However, the real path may not exist since the filter is called before checking path existence.
///
/// When a path doesn't pass the filter, [`Files::default_handler`] is called if set, otherwise,
/// `404 Not Found` is returned.
///
/// # Examples
/// ```
/// use std::path::Path;
/// use actix_files::Files;
///
/// // prevent searching subdirectories and following symlinks
/// let files_service = Files::new("/", "./static").path_filter(|path, _| {
/// path.components().count() == 1
/// && Path::new("./static")
/// .join(path)
/// .symlink_metadata()
/// .map(|m| !m.file_type().is_symlink())
/// .unwrap_or(false)
/// });
/// ```
pub fn path_filter<F>(mut self, f: F) -> Self
where
F: Fn(&Path, &RequestHead) -> bool + 'static,
{
self.path_filter = Some(Rc::new(f));
self
}
/// Set index file
///
/// Shows specific index file for directory "/" instead of
/// Shows specific index file for directories instead of
/// showing files listing.
///
/// If the index file is not found, files listing is shown as a fallback if
/// [`Files::show_files_listing()`] is set.
pub fn index_file<T: Into<String>>(mut self, index: T) -> Self {
self.index = Some(index.into());
self
@ -156,7 +208,6 @@ impl Files {
/// Specifies whether to use ETag or not.
///
/// Default is true.
#[inline]
pub fn use_etag(mut self, value: bool) -> Self {
self.file_flags.set(named::Flags::ETAG, value);
self
@ -165,7 +216,6 @@ impl Files {
/// Specifies whether to use Last-Modified or not.
///
/// Default is true.
#[inline]
pub fn use_last_modified(mut self, value: bool) -> Self {
self.file_flags.set(named::Flags::LAST_MD, value);
self
@ -174,37 +224,86 @@ impl Files {
/// Specifies whether text responses should signal a UTF-8 encoding.
///
/// Default is false (but will default to true in a future version).
#[inline]
pub fn prefer_utf8(mut self, value: bool) -> Self {
self.file_flags.set(named::Flags::PREFER_UTF8, value);
self
}
/// Specifies custom guards to use for directory listings and files.
/// Adds a routing guard.
///
/// Default behaviour allows GET and HEAD.
#[inline]
pub fn use_guards<G: Guard + 'static>(mut self, guards: G) -> Self {
self.guards = Some(Rc::new(guards));
/// Use this to allow multiple chained file services that respond to strictly different
/// properties of a request. Due to the way routing works, if a guard check returns true and the
/// request starts being handled by the file service, it will not be able to back-out and try
/// the next service, you will simply get a 404 (or 405) error response.
///
/// To allow `POST` requests to retrieve files, see [`Files::use_guards`].
///
/// # Examples
/// ```
/// use actix_web::{guard::Header, App};
/// use actix_files::Files;
///
/// App::new().service(
/// Files::new("/","/my/site/files")
/// .guard(Header("Host", "example.com"))
/// );
/// ```
pub fn guard<G: Guard + 'static>(mut self, guard: G) -> Self {
self.guards.push(Rc::new(guard));
self
}
/// Specifies guard to check before fetching directory listings or files.
///
/// Note that this guard has no effect on routing; it's main use is to guard on the request's
/// method just before serving the file, only allowing `GET` and `HEAD` requests by default.
/// See [`Files::guard`] for routing guards.
pub fn method_guard<G: Guard + 'static>(mut self, guard: G) -> Self {
self.use_guards = Some(Rc::new(guard));
self
}
/// See [`Files::method_guard`].
#[doc(hidden)]
#[deprecated(since = "0.6.0", note = "Renamed to `method_guard`.")]
pub fn use_guards<G: Guard + 'static>(self, guard: G) -> Self {
self.method_guard(guard)
}
/// Disable `Content-Disposition` header.
///
/// By default Content-Disposition` header is enabled.
#[inline]
pub fn disable_content_disposition(mut self) -> Self {
self.file_flags.remove(named::Flags::CONTENT_DISPOSITION);
self
}
/// Sets default handler which is used when no matched file could be found.
///
/// # Examples
/// Setting a fallback static file handler:
/// ```
/// use actix_files::{Files, NamedFile};
/// use actix_web::dev::{ServiceRequest, ServiceResponse, fn_service};
///
/// # fn run() -> Result<(), actix_web::Error> {
/// let files = Files::new("/", "./static")
/// .index_file("index.html")
/// .default_handler(fn_service(|req: ServiceRequest| async {
/// let (req, _) = req.into_parts();
/// let file = NamedFile::open_async("./static/404.html").await?;
/// let res = file.into_response(&req);
/// Ok(ServiceResponse::new(req, res))
/// }));
/// # Ok(())
/// # }
/// ```
pub fn default_handler<F, U>(mut self, f: F) -> Self
where
F: IntoServiceFactory<U>,
F: IntoServiceFactory<U, ServiceRequest>,
U: ServiceFactory<
ServiceRequest,
Config = (),
Request = ServiceRequest,
Response = ServiceResponse,
Error = Error,
> + 'static,
@ -218,7 +317,6 @@ impl Files {
}
/// Enables serving hidden files and directories, allowing a leading dots in url fragments.
#[inline]
pub fn use_hidden_files(mut self) -> Self {
self.hidden_files = true;
self
@ -226,23 +324,34 @@ impl Files {
}
impl HttpServiceFactory for Files {
fn register(self, config: &mut AppService) {
fn register(mut self, config: &mut AppService) {
let guards = if self.guards.is_empty() {
None
} else {
let guards = std::mem::take(&mut self.guards);
Some(
guards
.into_iter()
.map(|guard| -> Box<dyn Guard> { Box::new(guard) })
.collect::<Vec<_>>(),
)
};
if self.default.borrow().is_none() {
*self.default.borrow_mut() = Some(config.default_service());
}
let rdef = if config.is_root() {
ResourceDef::root_prefix(&self.path)
ResourceDef::root_prefix(&self.mount_path)
} else {
ResourceDef::prefix(&self.path)
ResourceDef::prefix(&self.mount_path)
};
config.register_service(rdef, None, self, None)
config.register_service(rdef, guards, self, None)
}
}
impl ServiceFactory for Files {
type Request = ServiceRequest;
impl ServiceFactory<ServiceRequest> for Files {
type Response = ServiceResponse;
type Error = Error;
type Config = ();
@ -251,7 +360,7 @@ impl ServiceFactory for Files {
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
let mut srv = FilesService {
let mut inner = FilesServiceInner {
directory: self.directory.clone(),
index: self.index.clone(),
show_index: self.show_index,
@ -259,24 +368,25 @@ impl ServiceFactory for Files {
default: None,
renderer: self.renderer.clone(),
mime_override: self.mime_override.clone(),
path_filter: self.path_filter.clone(),
file_flags: self.file_flags,
guards: self.guards.clone(),
guards: self.use_guards.clone(),
hidden_files: self.hidden_files,
};
if let Some(ref default) = *self.default.borrow() {
default
.new_service(())
.map(move |result| match result {
let fut = default.new_service(());
Box::pin(async {
match fut.await {
Ok(default) => {
srv.default = Some(default);
Ok(srv)
inner.default = Some(default);
Ok(FilesService(Rc::new(inner)))
}
Err(_) => Err(()),
})
.boxed_local()
}
})
} else {
ok(srv).boxed_local()
Box::pin(async move { Ok(FilesService(Rc::new(inner))) })
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,28 +1,30 @@
use std::fs::{File, Metadata};
use std::io;
use std::ops::{Deref, DerefMut};
use std::path::{Path, PathBuf};
use std::time::{SystemTime, UNIX_EPOCH};
#[cfg(unix)]
use std::os::unix::fs::MetadataExt;
use std::{
fs::Metadata,
io,
path::{Path, PathBuf},
time::{SystemTime, UNIX_EPOCH},
};
use actix_web::{
dev::{BodyEncoding, SizedStream},
body::{self, BoxBody, SizedStream},
dev::{
self, AppService, HttpServiceFactory, ResourceDef, Service, ServiceFactory,
ServiceRequest, ServiceResponse,
},
http::{
header::{
self, Charset, ContentDisposition, DispositionParam, DispositionType,
ExtendedValue,
self, Charset, ContentDisposition, ContentEncoding, DispositionParam,
DispositionType, ExtendedValue, HeaderValue,
},
ContentEncoding, StatusCode,
StatusCode,
},
Error, HttpMessage, HttpRequest, HttpResponse, Responder,
};
use bitflags::bitflags;
use futures_util::future::{ready, Ready};
use derive_more::{Deref, DerefMut};
use futures_core::future::LocalBoxFuture;
use mime_guess::from_path;
use crate::ChunkedReadFile;
use crate::{encoding::equiv_utf8_text, range::HttpRange};
bitflags! {
@ -36,15 +38,40 @@ bitflags! {
impl Default for Flags {
fn default() -> Self {
Flags::from_bits_truncate(0b0000_0111)
Flags::from_bits_truncate(0b0000_1111)
}
}
/// A file with an associated name.
#[derive(Debug)]
///
/// `NamedFile` can be registered as services:
/// ```
/// use actix_web::App;
/// use actix_files::NamedFile;
///
/// # async fn run() -> Result<(), Box<dyn std::error::Error>> {
/// let file = NamedFile::open_async("./static/index.html").await?;
/// let app = App::new().service(file);
/// # Ok(())
/// # }
/// ```
///
/// They can also be returned from handlers:
/// ```
/// use actix_web::{Responder, get};
/// use actix_files::NamedFile;
///
/// #[get("/")]
/// async fn index() -> impl Responder {
/// NamedFile::open_async("./static/index.html").await
/// }
/// ```
#[derive(Debug, Deref, DerefMut)]
pub struct NamedFile {
path: PathBuf,
#[deref]
#[deref_mut]
file: File,
path: PathBuf,
modified: Option<SystemTime>,
pub(crate) md: Metadata,
pub(crate) flags: Flags,
@ -54,6 +81,13 @@ pub struct NamedFile {
pub(crate) encoding: Option<ContentEncoding>,
}
#[cfg(not(feature = "experimental-io-uring"))]
pub(crate) use std::fs::File;
#[cfg(feature = "experimental-io-uring")]
pub(crate) use tokio_uring::fs::File;
use super::chunked;
impl NamedFile {
/// Creates an instance from a previously opened file.
///
@ -61,20 +95,19 @@ impl NamedFile {
/// `ContentDisposition` headers.
///
/// # Examples
///
/// ```rust
/// ```ignore
/// use std::{
/// io::{self, Write as _},
/// env,
/// fs::File
/// };
/// use actix_files::NamedFile;
/// use std::io::{self, Write};
/// use std::env;
/// use std::fs::File;
///
/// fn main() -> io::Result<()> {
/// let mut file = File::create("foo.txt")?;
/// file.write_all(b"Hello, world!")?;
/// let named_file = NamedFile::from_file(file, "bar.txt")?;
/// # std::fs::remove_file("foo.txt");
/// Ok(())
/// }
/// let mut file = File::create("foo.txt")?;
/// file.write_all(b"Hello, world!")?;
/// let named_file = NamedFile::from_file(file, "bar.txt")?;
/// # std::fs::remove_file("foo.txt");
/// Ok(())
/// ```
pub fn from_file<P: AsRef<Path>>(file: File, path: P) -> io::Result<NamedFile> {
let path = path.as_ref().to_path_buf();
@ -96,6 +129,11 @@ impl NamedFile {
let disposition = match ct.type_() {
mime::IMAGE | mime::TEXT | mime::VIDEO => DispositionType::Inline,
mime::APPLICATION => match ct.subtype() {
mime::JAVASCRIPT | mime::JSON => DispositionType::Inline,
name if name == "wasm" => DispositionType::Inline,
_ => DispositionType::Attachment,
},
_ => DispositionType::Attachment,
};
@ -118,7 +156,30 @@ impl NamedFile {
(ct, cd)
};
let md = file.metadata()?;
let md = {
#[cfg(not(feature = "experimental-io-uring"))]
{
file.metadata()?
}
#[cfg(feature = "experimental-io-uring")]
{
use std::os::unix::prelude::{AsRawFd, FromRawFd};
let fd = file.as_raw_fd();
// SAFETY: fd is borrowed and lives longer than the unsafe block
unsafe {
let file = std::fs::File::from_raw_fd(fd);
let md = file.metadata();
// SAFETY: forget the fd before exiting block in success or error case but don't
// run destructor (that would close file handle)
std::mem::forget(file);
md?
}
}
};
let modified = md.modified().ok();
let encoding = None;
@ -138,14 +199,43 @@ impl NamedFile {
/// Attempts to open a file in read-only mode.
///
/// # Examples
///
/// ```rust
/// ```
/// use actix_files::NamedFile;
///
/// let file = NamedFile::open("foo.txt");
/// ```
#[cfg(not(feature = "experimental-io-uring"))]
pub fn open<P: AsRef<Path>>(path: P) -> io::Result<NamedFile> {
Self::from_file(File::open(&path)?, path)
let file = File::open(&path)?;
Self::from_file(file, path)
}
#[allow(rustdoc::broken_intra_doc_links)]
/// Attempts to open a file asynchronously in read-only mode.
///
/// When the `experimental-io-uring` crate feature is enabled, this will be async.
/// Otherwise, it will be just like [`open`][Self::open].
///
/// # Examples
/// ```
/// use actix_files::NamedFile;
/// # async fn open() {
/// let file = NamedFile::open_async("foo.txt").await.unwrap();
/// # }
/// ```
pub async fn open_async<P: AsRef<Path>>(path: P) -> io::Result<NamedFile> {
let file = {
#[cfg(not(feature = "experimental-io-uring"))]
{
File::open(&path)?
}
#[cfg(feature = "experimental-io-uring")]
{
File::open(&path).await?
}
};
Self::from_file(file, path)
}
/// Returns reference to the underlying `File` object.
@ -157,13 +247,12 @@ impl NamedFile {
/// Retrieve the path of this file.
///
/// # Examples
///
/// ```rust
/// ```
/// # use std::io;
/// use actix_files::NamedFile;
///
/// # fn path() -> io::Result<()> {
/// let file = NamedFile::open("test.txt")?;
/// # async fn path() -> io::Result<()> {
/// let file = NamedFile::open_async("test.txt").await?;
/// assert_eq!(file.path().as_os_str(), "foo.txt");
/// # Ok(())
/// # }
@ -179,21 +268,21 @@ impl NamedFile {
self
}
/// Set the MIME Content-Type for serving this file. By default
/// the Content-Type is inferred from the filename extension.
/// Set the MIME Content-Type for serving this file. By default the Content-Type is inferred
/// from the filename extension.
#[inline]
pub fn set_content_type(mut self, mime_type: mime::Mime) -> Self {
self.content_type = mime_type;
self
}
/// Set the Content-Disposition for serving this file. This allows
/// changing the inline/attachment disposition as well as the filename
/// sent to the peer. By default the disposition is `inline` for text,
/// image, and video content types, and `attachment` otherwise, and
/// the filename is taken from the path provided in the `open` method
/// after converting it to UTF-8 using.
/// [`std::ffi::OsStr::to_string_lossy`]
/// Set the Content-Disposition for serving this file. This allows changing the
/// `inline/attachment` disposition as well as the filename sent to the peer.
///
/// By default the disposition is `inline` for `text/*`, `image/*`, `video/*` and
/// `application/{javascript, json, wasm}` mime types, and `attachment` otherwise, and the
/// filename is taken from the path provided in the `open` method after converting it to UTF-8
/// (using `to_string_lossy`).
#[inline]
pub fn set_content_disposition(mut self, cd: header::ContentDisposition) -> Self {
self.content_disposition = cd;
@ -210,14 +299,18 @@ impl NamedFile {
self
}
/// Set content encoding for serving this file
/// Sets content encoding for this file.
///
/// This prevents the `Compress` middleware from modifying the file contents and signals to
/// browsers/clients how to decode it. For example, if serving a compressed HTML file (e.g.,
/// `index.html.gz`) then use `.set_content_encoding(ContentEncoding::Gzip)`.
#[inline]
pub fn set_content_encoding(mut self, enc: ContentEncoding) -> Self {
self.encoding = Some(enc);
self
}
/// Specifies whether to use ETag or not.
/// Specifies whether to return `ETag` header in response.
///
/// Default is true.
#[inline]
@ -226,7 +319,7 @@ impl NamedFile {
self
}
/// Specifies whether to use Last-Modified or not.
/// Specifies whether to return `Last-Modified` header in response.
///
/// Default is true.
#[inline]
@ -244,14 +337,18 @@ impl NamedFile {
self
}
/// Creates an `ETag` in a format is similar to Apache's.
pub(crate) fn etag(&self) -> Option<header::EntityTag> {
// This etag format is similar to Apache's.
self.modified.as_ref().map(|mtime| {
let ino = {
#[cfg(unix)]
{
#[cfg(unix)]
use std::os::unix::fs::MetadataExt as _;
self.md.ino()
}
#[cfg(not(unix))]
{
0
@ -262,7 +359,7 @@ impl NamedFile {
.duration_since(UNIX_EPOCH)
.expect("modification time must be after epoch");
header::EntityTag::strong(format!(
header::EntityTag::new_strong(format!(
"{:x}:{:x}:{:x}:{:x}",
ino,
self.md.len(),
@ -277,37 +374,32 @@ impl NamedFile {
}
/// Creates an `HttpResponse` with file as a streaming body.
pub fn into_response(self, req: &HttpRequest) -> Result<HttpResponse, Error> {
pub fn into_response(self, req: &HttpRequest) -> HttpResponse<BoxBody> {
if self.status_code != StatusCode::OK {
let mut res = HttpResponse::build(self.status_code);
if self.flags.contains(Flags::PREFER_UTF8) {
let ct = equiv_utf8_text(self.content_type.clone());
res.header(header::CONTENT_TYPE, ct.to_string());
let ct = if self.flags.contains(Flags::PREFER_UTF8) {
equiv_utf8_text(self.content_type.clone())
} else {
res.header(header::CONTENT_TYPE, self.content_type.to_string());
}
self.content_type
};
res.insert_header((header::CONTENT_TYPE, ct.to_string()));
if self.flags.contains(Flags::CONTENT_DISPOSITION) {
res.header(
res.insert_header((
header::CONTENT_DISPOSITION,
self.content_disposition.to_string(),
);
));
}
if let Some(current_encoding) = self.encoding {
res.encoding(current_encoding);
res.insert_header((header::CONTENT_ENCODING, current_encoding.as_str()));
}
let reader = ChunkedReadFile {
size: self.md.len(),
offset: 0,
file: Some(self.file),
fut: None,
counter: 0,
};
let reader = chunked::new_chunked_read(self.md.len(), 0, self.file);
return Ok(res.streaming(reader));
return res.streaming(reader);
}
let etag = if self.flags.contains(Flags::ETAG) {
@ -328,11 +420,11 @@ impl NamedFile {
} else if let (Some(ref m), Some(header::IfUnmodifiedSince(ref since))) =
(last_modified, req.get_header())
{
let t1: SystemTime = m.clone().into();
let t2: SystemTime = since.clone().into();
let t1: SystemTime = (*m).into();
let t2: SystemTime = (*since).into();
match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) {
(Ok(t1), Ok(t2)) => t1 > t2,
(Ok(t1), Ok(t2)) => t1.as_secs() > t2.as_secs(),
_ => false,
}
} else {
@ -347,47 +439,47 @@ impl NamedFile {
} else if let (Some(ref m), Some(header::IfModifiedSince(ref since))) =
(last_modified, req.get_header())
{
let t1: SystemTime = m.clone().into();
let t2: SystemTime = since.clone().into();
let t1: SystemTime = (*m).into();
let t2: SystemTime = (*since).into();
match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) {
(Ok(t1), Ok(t2)) => t1 <= t2,
(Ok(t1), Ok(t2)) => t1.as_secs() <= t2.as_secs(),
_ => false,
}
} else {
false
};
let mut resp = HttpResponse::build(self.status_code);
let mut res = HttpResponse::build(self.status_code);
if self.flags.contains(Flags::PREFER_UTF8) {
let ct = equiv_utf8_text(self.content_type.clone());
resp.header(header::CONTENT_TYPE, ct.to_string());
let ct = if self.flags.contains(Flags::PREFER_UTF8) {
equiv_utf8_text(self.content_type.clone())
} else {
resp.header(header::CONTENT_TYPE, self.content_type.to_string());
}
self.content_type
};
res.insert_header((header::CONTENT_TYPE, ct.to_string()));
if self.flags.contains(Flags::CONTENT_DISPOSITION) {
resp.header(
res.insert_header((
header::CONTENT_DISPOSITION,
self.content_disposition.to_string(),
);
));
}
// default compressing
if let Some(current_encoding) = self.encoding {
resp.encoding(current_encoding);
res.insert_header((header::CONTENT_ENCODING, current_encoding.as_str()));
}
if let Some(lm) = last_modified {
resp.header(header::LAST_MODIFIED, lm.to_string());
res.insert_header((header::LAST_MODIFIED, lm.to_string()));
}
if let Some(etag) = etag {
resp.header(header::ETAG, etag.to_string());
res.insert_header((header::ETAG, etag.to_string()));
}
resp.header(header::ACCEPT_RANGES, "bytes");
res.insert_header((header::ACCEPT_RANGES, "bytes"));
let mut length = self.md.len();
let mut offset = 0;
@ -399,58 +491,41 @@ impl NamedFile {
length = ranges[0].length;
offset = ranges[0].start;
resp.encoding(ContentEncoding::Identity);
resp.header(
// don't allow compression middleware to modify partial content
res.insert_header((
header::CONTENT_ENCODING,
HeaderValue::from_static("identity"),
));
res.insert_header((
header::CONTENT_RANGE,
format!(
"bytes {}-{}/{}",
offset,
offset + length - 1,
self.md.len()
),
);
format!("bytes {}-{}/{}", offset, offset + length - 1, self.md.len()),
));
} else {
resp.header(header::CONTENT_RANGE, format!("bytes */{}", length));
return Ok(resp.status(StatusCode::RANGE_NOT_SATISFIABLE).finish());
res.insert_header((header::CONTENT_RANGE, format!("bytes */{}", length)));
return res.status(StatusCode::RANGE_NOT_SATISFIABLE).finish();
};
} else {
return Ok(resp.status(StatusCode::BAD_REQUEST).finish());
return res.status(StatusCode::BAD_REQUEST).finish();
};
};
if precondition_failed {
return Ok(resp.status(StatusCode::PRECONDITION_FAILED).finish());
return res.status(StatusCode::PRECONDITION_FAILED).finish();
} else if not_modified {
return Ok(resp.status(StatusCode::NOT_MODIFIED).finish());
return res
.status(StatusCode::NOT_MODIFIED)
.body(body::None::new())
.map_into_boxed_body();
}
let reader = ChunkedReadFile {
offset,
size: length,
file: Some(self.file),
fut: None,
counter: 0,
};
let reader = chunked::new_chunked_read(length, offset, self.file);
if offset != 0 || length != self.md.len() {
resp.status(StatusCode::PARTIAL_CONTENT);
res.status(StatusCode::PARTIAL_CONTENT);
}
Ok(resp.body(SizedStream::new(length, reader)))
}
}
impl Deref for NamedFile {
type Target = File;
fn deref(&self) -> &File {
&self.file
}
}
impl DerefMut for NamedFile {
fn deref_mut(&mut self) -> &mut File {
&mut self.file
res.body(SizedStream::new(length, reader))
}
}
@ -495,10 +570,62 @@ fn none_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
}
impl Responder for NamedFile {
type Error = Error;
type Future = Ready<Result<HttpResponse, Error>>;
type Body = BoxBody;
fn respond_to(self, req: &HttpRequest) -> Self::Future {
ready(self.into_response(req))
fn respond_to(self, req: &HttpRequest) -> HttpResponse<Self::Body> {
self.into_response(req)
}
}
impl ServiceFactory<ServiceRequest> for NamedFile {
type Response = ServiceResponse;
type Error = Error;
type Config = ();
type Service = NamedFileService;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
let service = NamedFileService {
path: self.path.clone(),
};
Box::pin(async move { Ok(service) })
}
}
#[doc(hidden)]
#[derive(Debug)]
pub struct NamedFileService {
path: PathBuf,
}
impl Service<ServiceRequest> for NamedFileService {
type Response = ServiceResponse;
type Error = Error;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
dev::always_ready!();
fn call(&self, req: ServiceRequest) -> Self::Future {
let (req, _) = req.into_parts();
let path = self.path.clone();
Box::pin(async move {
let file = NamedFile::open_async(path).await?;
let res = file.into_response(&req);
Ok(ServiceResponse::new(req, res))
})
}
}
impl HttpServiceFactory for NamedFile {
fn register(self, config: &mut AppService) {
config.register_service(
ResourceDef::root_prefix(self.path.to_string_lossy().as_ref()),
None,
self,
None,
)
}
}

View File

@ -1,14 +1,14 @@
use std::{
path::{Path, PathBuf},
path::{Component, Path, PathBuf},
str::FromStr,
};
use actix_utils::future::{ready, Ready};
use actix_web::{dev::Payload, FromRequest, HttpRequest};
use futures_util::future::{ready, Ready};
use crate::error::UriSegmentError;
#[derive(Debug)]
#[derive(Debug, PartialEq, Eq)]
pub(crate) struct PathBufWrap(PathBuf);
impl FromStr for PathBufWrap {
@ -21,11 +21,28 @@ impl FromStr for PathBufWrap {
impl PathBufWrap {
/// Parse a path, giving the choice of allowing hidden files to be considered valid segments.
///
/// Path traversal is guarded by this method.
pub fn parse_path(path: &str, hidden_files: bool) -> Result<Self, UriSegmentError> {
let mut buf = PathBuf::new();
// equivalent to `path.split('/').count()`
let mut segment_count = path.matches('/').count() + 1;
// we can decode the whole path here (instead of per-segment decoding)
// because we will reject `%2F` in paths using `segement_count`.
let path = percent_encoding::percent_decode_str(path)
.decode_utf8()
.map_err(|_| UriSegmentError::NotValidUtf8)?;
// disallow decoding `%2F` into `/`
if segment_count != path.matches('/').count() + 1 {
return Err(UriSegmentError::BadChar('/'));
}
for segment in path.split('/') {
if segment == ".." {
segment_count -= 1;
buf.pop();
} else if !hidden_files && segment.starts_with('.') {
return Err(UriSegmentError::BadStart('.'));
@ -38,14 +55,27 @@ impl PathBufWrap {
} else if segment.ends_with('<') {
return Err(UriSegmentError::BadEnd('<'));
} else if segment.is_empty() {
segment_count -= 1;
continue;
} else if cfg!(windows) && segment.contains('\\') {
return Err(UriSegmentError::BadChar('\\'));
} else if cfg!(windows) && segment.contains(':') {
return Err(UriSegmentError::BadChar(':'));
} else {
buf.push(segment)
}
}
// make sure we agree with stdlib parser
for (i, component) in buf.components().enumerate() {
assert!(
matches!(component, Component::Normal(_)),
"component `{:?}` is not normal",
component
);
assert!(i < segment_count);
}
Ok(PathBufWrap(buf))
}
}
@ -59,10 +89,9 @@ impl AsRef<Path> for PathBufWrap {
impl FromRequest for PathBufWrap {
type Error = UriSegmentError;
type Future = Ready<Result<Self, Self::Error>>;
type Config = ();
fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future {
ready(req.match_info().path().parse())
ready(req.match_info().unprocessed().parse())
}
}
@ -116,4 +145,46 @@ mod tests {
PathBuf::from_iter(vec!["test", ".tt"])
);
}
#[test]
fn path_traversal() {
assert_eq!(
PathBufWrap::parse_path("/../README.md", false).unwrap().0,
PathBuf::from_iter(vec!["README.md"])
);
assert_eq!(
PathBufWrap::parse_path("/../README.md", true).unwrap().0,
PathBuf::from_iter(vec!["README.md"])
);
assert_eq!(
PathBufWrap::parse_path("/../../../../../../../../../../etc/passwd", false)
.unwrap()
.0,
PathBuf::from_iter(vec!["etc/passwd"])
);
}
#[test]
#[cfg_attr(windows, should_panic)]
fn windows_drive_traversal() {
// detect issues in windows that could lead to path traversal
// see <https://github.com/SergioBenitez/Rocket/issues/1949
assert_eq!(
PathBufWrap::parse_path("C:test.txt", false).unwrap().0,
PathBuf::from_iter(vec!["C:test.txt"])
);
assert_eq!(
PathBufWrap::parse_path("C:../whatever", false).unwrap().0,
PathBuf::from_iter(vec!["C:../whatever"])
);
assert_eq!(
PathBufWrap::parse_path(":test.txt", false).unwrap().0,
PathBuf::from_iter(vec![":test.txt"])
);
}
}

View File

@ -1,3 +1,5 @@
use derive_more::{Display, Error};
/// HTTP Range header representation.
#[derive(Debug, Clone, Copy)]
pub struct HttpRange {
@ -8,91 +10,26 @@ pub struct HttpRange {
pub length: u64,
}
const PREFIX: &str = "bytes=";
const PREFIX_LEN: usize = 6;
#[derive(Debug, Clone, Display, Error)]
#[display(fmt = "Parse HTTP Range failed")]
pub struct ParseRangeErr(#[error(not(source))] ());
impl HttpRange {
/// Parses Range HTTP header string as per RFC 2616.
///
/// `header` is HTTP Range header (e.g. `bytes=bytes=0-9`).
/// `size` is full size of response (file).
pub fn parse(header: &str, size: u64) -> Result<Vec<HttpRange>, ()> {
if header.is_empty() {
return Ok(Vec::new());
pub fn parse(header: &str, size: u64) -> Result<Vec<HttpRange>, ParseRangeErr> {
match http_range::HttpRange::parse(header, size) {
Ok(ranges) => Ok(ranges
.iter()
.map(|range| HttpRange {
start: range.start,
length: range.length,
})
.collect()),
Err(_) => Err(ParseRangeErr(())),
}
if !header.starts_with(PREFIX) {
return Err(());
}
let size_sig = size as i64;
let mut no_overlap = false;
let all_ranges: Vec<Option<HttpRange>> = header[PREFIX_LEN..]
.split(',')
.map(|x| x.trim())
.filter(|x| !x.is_empty())
.map(|ra| {
let mut start_end_iter = ra.split('-');
let start_str = start_end_iter.next().ok_or(())?.trim();
let end_str = start_end_iter.next().ok_or(())?.trim();
if start_str.is_empty() {
// If no start is specified, end specifies the
// range start relative to the end of the file.
let mut length: i64 = end_str.parse().map_err(|_| ())?;
if length > size_sig {
length = size_sig;
}
Ok(Some(HttpRange {
start: (size_sig - length) as u64,
length: length as u64,
}))
} else {
let start: i64 = start_str.parse().map_err(|_| ())?;
if start < 0 {
return Err(());
}
if start >= size_sig {
no_overlap = true;
return Ok(None);
}
let length = if end_str.is_empty() {
// If no end is specified, range extends to end of the file.
size_sig - start
} else {
let mut end: i64 = end_str.parse().map_err(|_| ())?;
if start > end {
return Err(());
}
if end >= size_sig {
end = size_sig - 1;
}
end - start + 1
};
Ok(Some(HttpRange {
start: start as u64,
length: length as u64,
}))
}
})
.collect::<Result<_, _>>()?;
let ranges: Vec<HttpRange> = all_ranges.into_iter().filter_map(|x| x).collect();
if no_overlap && ranges.is_empty() {
return Err(());
}
Ok(ranges)
}
}
@ -333,8 +270,7 @@ mod tests {
if expected.is_empty() {
continue;
} else {
assert!(
false,
panic!(
"parse({}, {}) returned error {:?}",
header,
size,
@ -346,28 +282,24 @@ mod tests {
let got = res.unwrap();
if got.len() != expected.len() {
assert!(
false,
panic!(
"len(parseRange({}, {})) = {}, want {}",
header,
size,
got.len(),
expected.len()
);
continue;
}
for i in 0..expected.len() {
if got[i].start != expected[i].start {
assert!(
false,
panic!(
"parseRange({}, {})[{}].start = {}, want {}",
header, size, i, got[i].start, expected[i].start
)
}
if got[i].length != expected[i].length {
assert!(
false,
panic!(
"parseRange({}, {})[{}].length = {}, want {}",
header, size, i, got[i].length, expected[i].length
)

View File

@ -1,27 +1,33 @@
use std::{
fmt, io,
path::PathBuf,
rc::Rc,
task::{Context, Poll},
};
use std::{fmt, io, ops::Deref, path::PathBuf, rc::Rc};
use actix_service::Service;
use actix_web::{
dev::{ServiceRequest, ServiceResponse},
body::BoxBody,
dev::{self, Service, ServiceRequest, ServiceResponse},
error::Error,
guard::Guard,
http::{header, Method},
HttpResponse,
};
use futures_util::future::{ok, Either, LocalBoxFuture, Ready};
use futures_core::future::LocalBoxFuture;
use crate::{
named, Directory, DirectoryRenderer, FilesError, HttpService, MimeOverride,
NamedFile, PathBufWrap,
named, Directory, DirectoryRenderer, FilesError, HttpService, MimeOverride, NamedFile,
PathBufWrap, PathFilter,
};
/// Assembled file serving service.
pub struct FilesService {
#[derive(Clone)]
pub struct FilesService(pub(crate) Rc<FilesServiceInner>);
impl Deref for FilesService {
type Target = FilesServiceInner;
fn deref(&self) -> &Self::Target {
&*self.0
}
}
pub struct FilesServiceInner {
pub(crate) directory: PathBuf,
pub(crate) index: Option<String>,
pub(crate) show_index: bool,
@ -29,26 +35,56 @@ pub struct FilesService {
pub(crate) default: Option<HttpService>,
pub(crate) renderer: Rc<DirectoryRenderer>,
pub(crate) mime_override: Option<Rc<MimeOverride>>,
pub(crate) path_filter: Option<Rc<PathFilter>>,
pub(crate) file_flags: named::Flags,
pub(crate) guards: Option<Rc<dyn Guard>>,
pub(crate) hidden_files: bool,
}
type FilesServiceFuture = Either<
Ready<Result<ServiceResponse, Error>>,
LocalBoxFuture<'static, Result<ServiceResponse, Error>>,
>;
impl fmt::Debug for FilesServiceInner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("FilesServiceInner")
}
}
impl FilesService {
fn handle_err(&mut self, e: io::Error, req: ServiceRequest) -> FilesServiceFuture {
log::debug!("Failed to handle {}: {}", req.path(), e);
async fn handle_err(
&self,
err: io::Error,
req: ServiceRequest,
) -> Result<ServiceResponse, Error> {
log::debug!("error handling {}: {}", req.path(), err);
if let Some(ref mut default) = self.default {
Either::Right(default.call(req))
if let Some(ref default) = self.default {
default.call(req).await
} else {
Either::Left(ok(req.error_response(e)))
Ok(req.error_response(err))
}
}
fn serve_named_file(
&self,
req: ServiceRequest,
mut named_file: NamedFile,
) -> ServiceResponse {
if let Some(ref mime_override) = self.mime_override {
let new_disposition = mime_override(&named_file.content_type.type_());
named_file.content_disposition.disposition = new_disposition;
}
named_file.flags = self.file_flags;
let (req, _) = req.into_parts();
let res = named_file.into_response(&req);
ServiceResponse::new(req, res)
}
fn show_index(&self, req: ServiceRequest, path: PathBuf) -> ServiceResponse {
let dir = Directory::new(self.directory.clone(), path);
let (req, _) = req.into_parts();
(self.renderer)(&dir, &req).unwrap_or_else(|e| ServiceResponse::from_err(e, req))
}
}
impl fmt::Debug for FilesService {
@ -57,113 +93,103 @@ impl fmt::Debug for FilesService {
}
}
impl Service for FilesService {
type Request = ServiceRequest;
type Response = ServiceResponse;
impl Service<ServiceRequest> for FilesService {
type Response = ServiceResponse<BoxBody>;
type Error = Error;
type Future = FilesServiceFuture;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
dev::always_ready!();
fn call(&mut self, req: ServiceRequest) -> Self::Future {
fn call(&self, req: ServiceRequest) -> Self::Future {
let is_method_valid = if let Some(guard) = &self.guards {
// execute user defined guards
(**guard).check(req.head())
(**guard).check(&req.guard_ctx())
} else {
// default behavior
matches!(*req.method(), Method::HEAD | Method::GET)
};
if !is_method_valid {
return Either::Left(ok(req.into_response(
actix_web::HttpResponse::MethodNotAllowed()
.header(header::CONTENT_TYPE, "text/plain")
.body("Request did not meet this resource's requirements."),
)));
}
let this = self.clone();
let real_path =
match PathBufWrap::parse_path(req.match_info().path(), self.hidden_files) {
Box::pin(async move {
if !is_method_valid {
return Ok(req.into_response(
HttpResponse::MethodNotAllowed()
.insert_header(header::ContentType(mime::TEXT_PLAIN_UTF_8))
.body("Request did not meet this resource's requirements."),
));
}
let path_on_disk = match PathBufWrap::parse_path(
req.match_info().unprocessed(),
this.hidden_files,
) {
Ok(item) => item,
Err(e) => return Either::Left(ok(req.error_response(e))),
Err(err) => return Ok(req.error_response(err)),
};
// full file path
let path = match self.directory.join(&real_path).canonicalize() {
Ok(path) => path,
Err(e) => return self.handle_err(e, req),
};
if let Some(filter) = &this.path_filter {
if !filter(path_on_disk.as_ref(), req.head()) {
if let Some(ref default) = this.default {
return default.call(req).await;
} else {
return Ok(req.into_response(HttpResponse::NotFound().finish()));
}
}
}
if path.is_dir() {
if let Some(ref redir_index) = self.index {
if self.redirect_to_slash && !req.path().ends_with('/') {
// full file path
let path = this.directory.join(&path_on_disk);
if let Err(err) = path.canonicalize() {
return this.handle_err(err, req).await;
}
if path.is_dir() {
if this.redirect_to_slash
&& !req.path().ends_with('/')
&& (this.index.is_some() || this.show_index)
{
let redirect_to = format!("{}/", req.path());
return Either::Left(ok(req.into_response(
return Ok(req.into_response(
HttpResponse::Found()
.header(header::LOCATION, redirect_to)
.body("")
.into_body(),
)));
.insert_header((header::LOCATION, redirect_to))
.finish(),
));
}
let path = path.join(redir_index);
match NamedFile::open(path) {
match this.index {
Some(ref index) => {
let named_path = path.join(index);
match NamedFile::open_async(named_path).await {
Ok(named_file) => Ok(this.serve_named_file(req, named_file)),
Err(_) if this.show_index => Ok(this.show_index(req, path)),
Err(err) => this.handle_err(err, req).await,
}
}
None if this.show_index => Ok(this.show_index(req, path)),
None => Ok(ServiceResponse::from_err(
FilesError::IsDirectory,
req.into_parts().0,
)),
}
} else {
match NamedFile::open_async(&path).await {
Ok(mut named_file) => {
if let Some(ref mime_override) = self.mime_override {
if let Some(ref mime_override) = this.mime_override {
let new_disposition =
mime_override(&named_file.content_type.type_());
named_file.content_disposition.disposition = new_disposition;
}
named_file.flags = self.file_flags;
named_file.flags = this.file_flags;
let (req, _) = req.into_parts();
Either::Left(ok(match named_file.into_response(&req) {
Ok(item) => ServiceResponse::new(req, item),
Err(e) => ServiceResponse::from_err(e, req),
}))
let res = named_file.into_response(&req);
Ok(ServiceResponse::new(req, res))
}
Err(e) => self.handle_err(e, req),
Err(err) => this.handle_err(err, req).await,
}
} else if self.show_index {
let dir = Directory::new(self.directory.clone(), path);
let (req, _) = req.into_parts();
let x = (self.renderer)(&dir, &req);
match x {
Ok(resp) => Either::Left(ok(resp)),
Err(e) => Either::Left(ok(ServiceResponse::from_err(e, req))),
}
} else {
Either::Left(ok(ServiceResponse::from_err(
FilesError::IsDirectory,
req.into_parts().0,
)))
}
} else {
match NamedFile::open(path) {
Ok(mut named_file) => {
if let Some(ref mime_override) = self.mime_override {
let new_disposition =
mime_override(&named_file.content_type.type_());
named_file.content_disposition.disposition = new_disposition;
}
named_file.flags = self.file_flags;
let (req, _) = req.into_parts();
match named_file.into_response(&req) {
Ok(item) => {
Either::Left(ok(ServiceResponse::new(req.clone(), item)))
}
Err(e) => Either::Left(ok(ServiceResponse::from_err(e, req))),
}
}
Err(e) => self.handle_err(e, req),
}
}
})
}
}

View File

@ -8,33 +8,31 @@ use actix_web::{
App,
};
#[actix_rt::test]
#[actix_web::test]
async fn test_utf8_file_contents() {
// use default ISO-8859-1 encoding
let mut srv =
test::init_service(App::new().service(Files::new("/", "./tests"))).await;
let srv = test::init_service(App::new().service(Files::new("/", "./tests"))).await;
let req = TestRequest::with_uri("/utf8.txt").to_request();
let res = test::call_service(&mut srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(
res.headers().get(header::CONTENT_TYPE),
Some(&HeaderValue::from_static("text/plain")),
);
// prefer UTF-8 encoding
let mut srv = test::init_service(
App::new().service(Files::new("/", "./tests").prefer_utf8(true)),
)
.await;
let req = TestRequest::with_uri("/utf8.txt").to_request();
let res = test::call_service(&mut srv, req).await;
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(
res.headers().get(header::CONTENT_TYPE),
Some(&HeaderValue::from_static("text/plain; charset=utf-8")),
);
// disable UTF-8 attribute
let srv =
test::init_service(App::new().service(Files::new("/", "./tests").prefer_utf8(false)))
.await;
let req = TestRequest::with_uri("/utf8.txt").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(
res.headers().get(header::CONTENT_TYPE),
Some(&HeaderValue::from_static("text/plain")),
);
}

View File

@ -0,0 +1 @@
first

View File

@ -0,0 +1 @@
second

View File

@ -0,0 +1,36 @@
use actix_files::Files;
use actix_web::{
guard::Host,
http::StatusCode,
test::{self, TestRequest},
App,
};
use bytes::Bytes;
#[actix_web::test]
async fn test_guard_filter() {
let srv = test::init_service(
App::new()
.service(Files::new("/", "./tests/fixtures/guards/first").guard(Host("first.com")))
.service(
Files::new("/", "./tests/fixtures/guards/second").guard(Host("second.com")),
),
)
.await;
let req = TestRequest::with_uri("/index.txt")
.append_header(("Host", "first.com"))
.to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(test::read_body(res).await, Bytes::from("first"));
let req = TestRequest::with_uri("/index.txt")
.append_header(("Host", "second.com"))
.to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(test::read_body(res).await, Bytes::from("second"));
}

View File

@ -0,0 +1 @@
test.png

View File

@ -0,0 +1 @@
// this file is empty.

View File

@ -0,0 +1,27 @@
use actix_files::Files;
use actix_web::{
http::StatusCode,
test::{self, TestRequest},
App,
};
#[actix_rt::test]
async fn test_directory_traversal_prevention() {
let srv = test::init_service(App::new().service(Files::new("/", "./tests"))).await;
let req =
TestRequest::with_uri("/../../../../../../../../../../../etc/passwd").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::NOT_FOUND);
let req = TestRequest::with_uri(
"/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/%2e%2e/etc/passwd",
)
.to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::NOT_FOUND);
let req = TestRequest::with_uri("/%00/etc/passwd%00").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::NOT_FOUND);
}

View File

@ -1,80 +1,146 @@
# Changes
## Unreleased - 2020-xx-xx
## Unreleased - 2021-xx-xx
## 3.0.0-beta.13 - 2022-02-16
- No significant changes since `3.0.0-beta.12`.
## 3.0.0-beta.12 - 2022-01-31
- No significant changes since `3.0.0-beta.11`.
## 3.0.0-beta.11 - 2022-01-04
- Minimum supported Rust version (MSRV) is now 1.54.
## 3.0.0-beta.10 - 2021-12-27
- Update `actix-server` to `2.0.0-rc.2`. [#2550]
[#2550]: https://github.com/actix/actix-web/pull/2550
## 3.0.0-beta.9 - 2021-12-11
- No significant changes since `3.0.0-beta.8`.
## 3.0.0-beta.8 - 2021-11-30
- Update `actix-tls` to `3.0.0-rc.1`. [#2474]
[#2474]: https://github.com/actix/actix-web/pull/2474
## 3.0.0-beta.7 - 2021-11-22
- Fix compatibility with experimental `io-uring` feature of `actix-rt`. [#2408]
[#2408]: https://github.com/actix/actix-web/pull/2408
## 3.0.0-beta.6 - 2021-11-15
- `TestServer::stop` is now async and will wait for the server and system to shutdown. [#2442]
- Update `actix-server` to `2.0.0-beta.9`. [#2442]
- Minimum supported Rust version (MSRV) is now 1.52.
[#2442]: https://github.com/actix/actix-web/pull/2442
## 3.0.0-beta.5 - 2021-09-09
- Minimum supported Rust version (MSRV) is now 1.51.
## 3.0.0-beta.4 - 2021-04-02
- Added `TestServer::client_headers` method. [#2097]
[#2097]: https://github.com/actix/actix-web/pull/2097
## 3.0.0-beta.3 - 2021-03-09
- No notable changes.
## 3.0.0-beta.2 - 2021-02-10
- No notable changes.
## 3.0.0-beta.1 - 2021-01-07
- Update `bytes` to `1.0`. [#1813]
[#1813]: https://github.com/actix/actix-web/pull/1813
## 2.1.0 - 2020-11-25
* Add ability to set address for `TestServer`. [#1645]
* Upgrade `base64` to `0.13`.
* Upgrade `serde_urlencoded` to `0.7`. [#1773]
- Add ability to set address for `TestServer`. [#1645]
- Upgrade `base64` to `0.13`.
- Upgrade `serde_urlencoded` to `0.7`. [#1773]
[#1773]: https://github.com/actix/actix-web/pull/1773
[#1645]: https://github.com/actix/actix-web/pull/1645
## 2.0.0 - 2020-09-11
* Update actix-codec and actix-utils dependencies.
- Update actix-codec and actix-utils dependencies.
## 2.0.0-alpha.1 - 2020-05-23
* Update the `time` dependency to 0.2.7
* Update `actix-connect` dependency to 2.0.0-alpha.2
* Make `test_server` `async` fn.
* Bump minimum supported Rust version to 1.40
* Replace deprecated `net2` crate with `socket2`
* Update `base64` dependency to 0.12
* Update `env_logger` dependency to 0.7
- Update the `time` dependency to 0.2.7
- Update `actix-connect` dependency to 2.0.0-alpha.2
- Make `test_server` `async` fn.
- Bump minimum supported Rust version to 1.40
- Replace deprecated `net2` crate with `socket2`
- Update `base64` dependency to 0.12
- Update `env_logger` dependency to 0.7
## 1.0.0 - 2019-12-13
* Replaced `TestServer::start()` with `test_server()`
- Replaced `TestServer::start()` with `test_server()`
## 1.0.0-alpha.3 - 2019-12-07
* Migrate to `std::future`
- Migrate to `std::future`
## 0.2.5 - 2019-09-17
* Update serde_urlencoded to "0.6.1"
* Increase TestServerRuntime timeouts from 500ms to 3000ms
* Do not override current `System`
- Update serde_urlencoded to "0.6.1"
- Increase TestServerRuntime timeouts from 500ms to 3000ms
- Do not override current `System`
## 0.2.4 - 2019-07-18
* Update actix-server to 0.6
- Update actix-server to 0.6
## 0.2.3 - 2019-07-16
* Add `delete`, `options`, `patch` methods to `TestServerRunner`
- Add `delete`, `options`, `patch` methods to `TestServerRunner`
## 0.2.2 - 2019-06-16
* Add .put() and .sput() methods
- Add .put() and .sput() methods
## 0.2.1 - 2019-06-05
* Add license files
- Add license files
## 0.2.0 - 2019-05-12
* Update awc and actix-http deps
- Update awc and actix-http deps
## 0.1.1 - 2019-04-24
* Always make new connection for http client
- Always make new connection for http client
## 0.1.0 - 2019-04-16
* No changes
- No changes
## 0.1.0-alpha.3 - 2019-04-02
* Request functions accept path #743
- Request functions accept path #743
## 0.1.0-alpha.2 - 2019-03-29
* Added TestServerRuntime::load_body() method
* Update actix-http and awc libraries
- Added TestServerRuntime::load_body() method
- Update actix-http and awc libraries
## 0.1.0-alpha.1 - 2019-03-28
* Initial impl
- Initial impl

View File

@ -1,18 +1,18 @@
[package]
name = "actix-http-test"
version = "2.1.0"
version = "3.0.0-beta.13"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Various helpers for Actix applications to use during testing"
readme = "README.md"
keywords = ["http", "web", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-http-test/"
categories = ["network-programming", "asynchronous",
"web-programming::http-server",
"web-programming::websocket"]
categories = [
"network-programming",
"asynchronous",
"web-programming::http-server",
"web-programming::websocket",
]
license = "MIT OR Apache-2.0"
exclude = [".gitignore", ".cargo/config"]
edition = "2018"
[package.metadata.docs.rs]
@ -26,31 +26,30 @@ path = "src/lib.rs"
default = []
# openssl
openssl = ["open-ssl", "awc/openssl"]
openssl = ["tls-openssl", "awc/openssl"]
[dependencies]
actix-service = "1.0.6"
actix-codec = "0.3.0"
actix-connect = "2.0.0"
actix-utils = "2.0.0"
actix-rt = "1.1.1"
actix-server = "1.0.0"
actix-testing = "1.0.0"
awc = "2.0.0"
actix-service = "2.0.0"
actix-codec = "0.5"
actix-tls = "3"
actix-utils = "3.0.0"
actix-rt = "2.2"
actix-server = "2"
awc = { version = "3.0.0-beta.21", default-features = false }
base64 = "0.13"
bytes = "0.5.3"
futures-core = { version = "0.3.5", default-features = false }
http = "0.2.0"
bytes = "1"
futures-core = { version = "0.3.7", default-features = false }
http = "0.2.5"
log = "0.4"
socket2 = "0.3"
socket2 = "0.4"
serde = "1.0"
serde_json = "1.0"
slab = "0.4"
serde_urlencoded = "0.7"
time = { version = "0.2.7", default-features = false, features = ["std"] }
open-ssl = { version = "0.10", package = "openssl", optional = true }
tls-openssl = { version = "0.10.9", package = "openssl", optional = true }
tokio = { version = "1.8.4", features = ["sync"] }
[dev-dependencies]
actix-web = "3.0.0"
actix-http = "2.0.0"
actix-web = { version = "4.0.0", default-features = false, features = ["cookies"] }
actix-http = "3.0.0"

View File

@ -3,13 +3,15 @@
> Various helpers for Actix applications to use during testing.
[![crates.io](https://img.shields.io/crates/v/actix-http-test?label=latest)](https://crates.io/crates/actix-http-test)
[![Documentation](https://docs.rs/actix-http-test/badge.svg?version=2.1.0)](https://docs.rs/actix-http-test/2.1.0)
![Apache 2.0 or MIT licensed](https://img.shields.io/crates/l/actix-http-test)
[![Dependency Status](https://deps.rs/crate/actix-http-test/2.1.0/status.svg)](https://deps.rs/crate/actix-http-test/2.1.0)
[![Join the chat at https://gitter.im/actix/actix-web](https://badges.gitter.im/actix/actix-web.svg)](https://gitter.im/actix/actix-web?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Documentation](https://docs.rs/actix-http-test/badge.svg?version=3.0.0-beta.13)](https://docs.rs/actix-http-test/3.0.0-beta.13)
[![Version](https://img.shields.io/badge/rustc-1.54+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.54.0.html)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http-test)
<br>
[![Dependency Status](https://deps.rs/crate/actix-http-test/3.0.0-beta.13/status.svg)](https://deps.rs/crate/actix-http-test/3.0.0-beta.13)
[![Download](https://img.shields.io/crates/d/actix-http-test.svg)](https://crates.io/crates/actix-http-test)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
## Documentation & Resources
- [API Documentation](https://docs.rs/actix-http-test)
- [Chat on Gitter](https://gitter.im/actix/actix-web)
- Minimum Supported Rust Version (MSRV): 1.42.0
- Minimum Supported Rust Version (MSRV): 1.54

View File

@ -1,45 +1,48 @@
//! Various helpers for Actix applications to use during testing.
#![deny(rust_2018_idioms)]
#![deny(rust_2018_idioms, nonstandard_style)]
#![warn(future_incompatible)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
use std::sync::mpsc;
use std::{net, thread, time};
#[cfg(feature = "openssl")]
extern crate tls_openssl as openssl;
use std::{net, thread, time::Duration};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_rt::{net::TcpStream, System};
use actix_server::{Server, ServiceFactory};
use awc::{error::PayloadError, ws, Client, ClientRequest, ClientResponse, Connector};
use actix_server::{Server, ServerServiceFactory};
use awc::{
error::PayloadError, http::header::HeaderMap, ws, Client, ClientRequest, ClientResponse,
Connector,
};
use bytes::Bytes;
use futures_core::stream::Stream;
use http::Method;
use socket2::{Domain, Protocol, Socket, Type};
use tokio::sync::mpsc;
pub use actix_testing::*;
/// Start test server
/// Start test server.
///
/// `TestServer` is very simple test server that simplify process of writing
/// integration tests cases for actix web applications.
/// `TestServer` is very simple test server that simplify process of writing integration tests cases
/// for HTTP applications.
///
/// # Examples
///
/// ```rust
/// ```no_run
/// use actix_http::HttpService;
/// use actix_http_test::TestServer;
/// use actix_http_test::test_server;
/// use actix_web::{web, App, HttpResponse, Error};
///
/// async fn my_handler() -> Result<HttpResponse, Error> {
/// Ok(HttpResponse::Ok().into())
/// }
///
/// #[actix_rt::test]
/// #[actix_web::test]
/// async fn test_example() {
/// let mut srv = TestServer::start(
/// || HttpService::new(
/// App::new().service(
/// web::resource("/").to(my_handler))
/// let mut srv = TestServer::start(||
/// HttpService::new(
/// App::new().service(web::resource("/").to(my_handler))
/// )
/// );
///
@ -48,88 +51,91 @@ pub use actix_testing::*;
/// assert!(response.status().is_success());
/// }
/// ```
pub async fn test_server<F: ServiceFactory<TcpStream>>(factory: F) -> TestServer {
pub async fn test_server<F: ServerServiceFactory<TcpStream>>(factory: F) -> TestServer {
let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap();
test_server_with_addr(tcp, factory).await
}
/// Start [`test server`](test_server()) on a concrete Address
pub async fn test_server_with_addr<F: ServiceFactory<TcpStream>>(
/// Start [`test server`](test_server()) on an existing address binding.
pub async fn test_server_with_addr<F: ServerServiceFactory<TcpStream>>(
tcp: net::TcpListener,
factory: F,
) -> TestServer {
let (tx, rx) = mpsc::channel();
let (started_tx, started_rx) = std::sync::mpsc::channel();
let (thread_stop_tx, thread_stop_rx) = mpsc::channel(1);
// run server in separate thread
thread::spawn(move || {
let sys = System::new("actix-test-server");
let local_addr = tcp.local_addr().unwrap();
System::new().block_on(async move {
let local_addr = tcp.local_addr().unwrap();
Server::build()
.listen("test", tcp, factory)?
.workers(1)
.disable_signals()
.start();
let srv = Server::build()
.workers(1)
.disable_signals()
.system_exit()
.listen("test", tcp, factory)
.expect("test server could not be created");
tx.send((System::current(), local_addr)).unwrap();
sys.run()
let srv = srv.run();
started_tx
.send((System::current(), srv.handle(), local_addr))
.unwrap();
// drive server loop
srv.await.unwrap();
});
// notify TestServer that server and system have shut down
// all thread managed resources should be dropped at this point
let _ = thread_stop_tx.send(());
});
let (system, addr) = rx.recv().unwrap();
let (system, server, addr) = started_rx.recv().unwrap();
let client = {
#[cfg(feature = "openssl")]
let connector = {
#[cfg(feature = "openssl")]
{
use open_ssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
let mut builder = SslConnector::builder(SslMethod::tls()).unwrap();
builder.set_verify(SslVerifyMode::NONE);
let _ = builder
.set_alpn_protos(b"\x02h2\x08http/1.1")
.map_err(|e| log::error!("Can not set alpn protocol: {:?}", e));
Connector::new()
.conn_lifetime(time::Duration::from_secs(0))
.timeout(time::Duration::from_millis(30000))
.ssl(builder.build())
.finish()
}
#[cfg(not(feature = "openssl"))]
{
Connector::new()
.conn_lifetime(time::Duration::from_secs(0))
.timeout(time::Duration::from_millis(30000))
.finish()
}
let mut builder = SslConnector::builder(SslMethod::tls()).unwrap();
builder.set_verify(SslVerifyMode::NONE);
let _ = builder
.set_alpn_protos(b"\x02h2\x08http/1.1")
.map_err(|e| log::error!("Can not set alpn protocol: {:?}", e));
Connector::new()
.conn_lifetime(Duration::from_secs(0))
.timeout(Duration::from_millis(30000))
.openssl(builder.build())
};
#[cfg(not(feature = "openssl"))]
let connector = {
Connector::new()
.conn_lifetime(Duration::from_secs(0))
.timeout(Duration::from_millis(30000))
};
Client::builder().connector(connector).finish()
};
actix_connect::start_default_resolver().await.unwrap();
TestServer {
addr,
server,
client,
system,
addr,
thread_stop_rx,
}
}
/// Get first available unused address
pub fn unused_addr() -> net::SocketAddr {
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
let socket =
Socket::new(Domain::ipv4(), Type::stream(), Some(Protocol::tcp())).unwrap();
socket.bind(&addr.into()).unwrap();
socket.set_reuse_address(true).unwrap();
let tcp = socket.into_tcp_listener();
tcp.local_addr().unwrap()
}
/// Test server controller
pub struct TestServer {
server: actix_server::ServerHandle,
client: awc::Client,
system: actix_rt::System,
addr: net::SocketAddr,
client: Client,
system: System,
thread_stop_rx: mpsc::Receiver<()>,
}
impl TestServer {
@ -147,7 +153,7 @@ impl TestServer {
}
}
/// Construct test https server url
/// Construct test HTTPS server URL.
pub fn surl(&self, uri: &str) -> String {
if uri.starts_with('/') {
format!("https://localhost:{}{}", self.addr.port(), uri)
@ -161,7 +167,7 @@ impl TestServer {
self.client.get(self.url(path.as_ref()).as_str())
}
/// Create https `GET` request
/// Create HTTPS `GET` request
pub fn sget<S: AsRef<str>>(&self, path: S) -> ClientRequest {
self.client.get(self.surl(path.as_ref()).as_str())
}
@ -171,7 +177,7 @@ impl TestServer {
self.client.post(self.url(path.as_ref()).as_str())
}
/// Create https `POST` request
/// Create HTTPS `POST` request
pub fn spost<S: AsRef<str>>(&self, path: S) -> ClientRequest {
self.client.post(self.surl(path.as_ref()).as_str())
}
@ -181,7 +187,7 @@ impl TestServer {
self.client.head(self.url(path.as_ref()).as_str())
}
/// Create https `HEAD` request
/// Create HTTPS `HEAD` request
pub fn shead<S: AsRef<str>>(&self, path: S) -> ClientRequest {
self.client.head(self.surl(path.as_ref()).as_str())
}
@ -191,7 +197,7 @@ impl TestServer {
self.client.put(self.url(path.as_ref()).as_str())
}
/// Create https `PUT` request
/// Create HTTPS `PUT` request
pub fn sput<S: AsRef<str>>(&self, path: S) -> ClientRequest {
self.client.put(self.surl(path.as_ref()).as_str())
}
@ -201,7 +207,7 @@ impl TestServer {
self.client.patch(self.url(path.as_ref()).as_str())
}
/// Create https `PATCH` request
/// Create HTTPS `PATCH` request
pub fn spatch<S: AsRef<str>>(&self, path: S) -> ClientRequest {
self.client.patch(self.surl(path.as_ref()).as_str())
}
@ -211,7 +217,7 @@ impl TestServer {
self.client.delete(self.url(path.as_ref()).as_str())
}
/// Create https `DELETE` request
/// Create HTTPS `DELETE` request
pub fn sdelete<S: AsRef<str>>(&self, path: S) -> ClientRequest {
self.client.delete(self.surl(path.as_ref()).as_str())
}
@ -221,12 +227,12 @@ impl TestServer {
self.client.options(self.url(path.as_ref()).as_str())
}
/// Create https `OPTIONS` request
/// Create HTTPS `OPTIONS` request
pub fn soptions<S: AsRef<str>>(&self, path: S) -> ClientRequest {
self.client.options(self.surl(path.as_ref()).as_str())
}
/// Connect to test http server
/// Connect to test HTTP server
pub fn request<S: AsRef<str>>(&self, method: Method, path: S) -> ClientRequest {
self.client.request(method, path.as_ref())
}
@ -241,33 +247,66 @@ impl TestServer {
response.body().limit(10_485_760).await
}
/// Connect to websocket server at a given path
/// Connect to WebSocket server at a given path.
pub async fn ws_at(
&mut self,
path: &str,
) -> Result<Framed<impl AsyncRead + AsyncWrite, ws::Codec>, awc::error::WsClientError>
{
) -> Result<Framed<impl AsyncRead + AsyncWrite, ws::Codec>, awc::error::WsClientError> {
let url = self.url(path);
let connect = self.client.ws(url).connect();
connect.await.map(|(_, framed)| framed)
}
/// Connect to a websocket server
/// Connect to a WebSocket server.
pub async fn ws(
&mut self,
) -> Result<Framed<impl AsyncRead + AsyncWrite, ws::Codec>, awc::error::WsClientError>
{
) -> Result<Framed<impl AsyncRead + AsyncWrite, ws::Codec>, awc::error::WsClientError> {
self.ws_at("/").await
}
/// Stop http server
fn stop(&mut self) {
/// Get default HeaderMap of Client.
///
/// Returns Some(&mut HeaderMap) when Client object is unique
/// (No other clone of client exists at the same time).
pub fn client_headers(&mut self) -> Option<&mut HeaderMap> {
self.client.headers()
}
/// Stop HTTP server.
///
/// Waits for spawned `Server` and `System` to (force) shutdown.
pub async fn stop(&mut self) {
// signal server to stop
self.server.stop(false).await;
// also signal system to stop
// though this is handled by `ServerBuilder::exit_system` too
self.system.stop();
// wait for thread to be stopped but don't care about result
let _ = self.thread_stop_rx.recv().await;
}
}
impl Drop for TestServer {
fn drop(&mut self) {
self.stop()
// calls in this Drop impl should be enough to shut down the server, system, and thread
// without needing to await anything
// signal server to stop
let _ = self.server.stop(true);
// signal system to stop
self.system.stop();
}
}
/// Get a localhost socket address with random, unused port.
pub fn unused_addr() -> net::SocketAddr {
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
let socket = Socket::new(Domain::IPV4, Type::STREAM, Some(Protocol::TCP)).unwrap();
socket.bind(&addr.into()).unwrap();
socket.set_reuse_address(true).unwrap();
let tcp = net::TcpListener::from(socket);
tcp.local_addr().unwrap()
}

File diff suppressed because it is too large Load Diff

View File

@ -1,21 +1,26 @@
[package]
name = "actix-http"
version = "2.2.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
version = "3.0.0"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
]
description = "HTTP primitives for the Actix ecosystem"
readme = "README.md"
keywords = ["actix", "http", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-http/"
categories = ["network-programming", "asynchronous",
"web-programming::http-server",
"web-programming::websocket"]
categories = [
"network-programming",
"asynchronous",
"web-programming::http-server",
"web-programming::websocket",
]
license = "MIT OR Apache-2.0"
edition = "2018"
[package.metadata.docs.rs]
features = ["openssl", "rustls", "compress", "secure-cookies", "actors"]
# features that docs.rs will build with
features = ["http2", "openssl", "rustls", "compress-brotli", "compress-gzip", "compress-zstd"]
[lib]
name = "actix_http"
@ -24,80 +29,101 @@ path = "src/lib.rs"
[features]
default = []
# openssl
openssl = ["actix-tls/openssl", "actix-connect/openssl"]
# HTTP/2 protocol support
http2 = ["h2"]
# rustls support
rustls = ["actix-tls/rustls", "actix-connect/rustls"]
# WebSocket protocol implementation
ws = [
"local-channel",
"base64",
"rand",
"sha-1",
]
# enable compressison support
compress = ["flate2", "brotli2"]
# TLS via OpenSSL
openssl = ["actix-tls/accept", "actix-tls/openssl"]
# support for secure cookies
secure-cookies = ["cookie/secure"]
# TLS via Rustls
rustls = ["actix-tls/accept", "actix-tls/rustls"]
# support for actix Actor messages
actors = ["actix"]
# Compression codecs
compress-brotli = ["__compress", "brotli"]
compress-gzip = ["__compress", "flate2"]
compress-zstd = ["__compress", "zstd"]
# Internal (PRIVATE!) features used to aid testing and cheking feature status.
# Don't rely on these whatsoever. They are semver-exempt and may disappear at anytime.
__compress = []
[dependencies]
actix-service = "1.0.6"
actix-codec = "0.3.0"
actix-connect = "2.0.0"
actix-utils = "2.0.0"
actix-rt = "1.0.0"
actix-threadpool = "0.3.1"
actix-tls = { version = "2.0.0", optional = true }
actix = { version = "0.10.0", optional = true }
actix-service = "2"
actix-codec = "0.5"
actix-utils = "3"
actix-rt = { version = "2.2", default-features = false }
base64 = "0.13"
ahash = "0.7"
bitflags = "1.2"
bytes = "0.5.3"
cookie = { version = "0.14.1", features = ["percent-encode"] }
copyless = "0.1.4"
derive_more = "0.99.2"
either = "1.5.3"
bytes = "1"
bytestring = "1"
derive_more = "0.99.5"
encoding_rs = "0.8"
futures-channel = { version = "0.3.5", default-features = false }
futures-core = { version = "0.3.5", default-features = false }
futures-util = { version = "0.3.5", default-features = false }
fxhash = "0.2.1"
h2 = "0.2.1"
http = "0.2.0"
httparse = "1.3"
indexmap = "1.3"
itoa = "0.4"
lazy_static = "1.4"
language-tags = "0.2"
futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
http = "0.2.5"
httparse = "1.5.1"
httpdate = "1.0.1"
itoa = "1"
language-tags = "0.3"
log = "0.4"
mime = "0.3"
percent-encoding = "2.1"
pin-project = "1.0.0"
rand = "0.8"
regex = "1.3"
serde = "1.0"
serde_json = "1.0"
sha-1 = "0.9"
slab = "0.4"
serde_urlencoded = "0.7"
time = { version = "0.2.7", default-features = false, features = ["std"] }
pin-project-lite = "0.2"
smallvec = "1.6.1"
# compression
brotli2 = { version="0.3.2", optional = true }
# http2
h2 = { version = "0.3.9", optional = true }
# websockets
local-channel = { version = "0.1", optional = true }
base64 = { version = "0.13", optional = true }
rand = { version = "0.8", optional = true }
sha-1 = { version = "0.10", optional = true }
# openssl/rustls
actix-tls = { version = "3", default-features = false, optional = true }
# compress-*
brotli = { version = "3.3.3", optional = true }
flate2 = { version = "1.0.13", optional = true }
zstd = { version = "0.10", optional = true }
[dev-dependencies]
actix-server = "1.0.1"
actix-connect = { version = "2.0.0", features = ["openssl"] }
actix-http-test = { version = "2.0.0", features = ["openssl"] }
actix-tls = { version = "2.0.0", features = ["openssl"] }
criterion = "0.3"
env_logger = "0.7"
serde_derive = "1.0"
open-ssl = { version="0.10", package = "openssl" }
rust-tls = { version="0.18", package = "rustls" }
actix-http-test = { version = "3.0.0-beta.13", features = ["openssl"] }
actix-server = "2"
actix-tls = { version = "3", features = ["openssl"] }
actix-web = "4.0.0"
async-stream = "0.3"
criterion = { version = "0.3", features = ["html_reports"] }
env_logger = "0.9"
futures-util = { version = "0.3.7", default-features = false, features = ["alloc"] }
memchr = "2.4"
once_cell = "1.9"
rcgen = "0.8"
regex = "1.3"
rustls-pemfile = "0.2"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
static_assertions = "1"
tls-openssl = { package = "openssl", version = "0.10.9" }
tls-rustls = { package = "rustls", version = "0.20.0" }
tokio = { version = "1.8.4", features = ["net", "rt", "macros"] }
[[example]]
name = "ws"
required-features = ["rustls"]
[[bench]]
name = "content-length"
name = "write-camel-case"
harness = false
[[bench]]
@ -107,3 +133,7 @@ harness = false
[[bench]]
name = "uninit-headers"
harness = false
[[bench]]
name = "quality-value"
harness = false

View File

@ -3,16 +3,18 @@
> HTTP primitives for the Actix ecosystem.
[![crates.io](https://img.shields.io/crates/v/actix-http?label=latest)](https://crates.io/crates/actix-http)
[![Documentation](https://docs.rs/actix-http/badge.svg?version=2.2.0)](https://docs.rs/actix-http/2.2.0)
![Apache 2.0 or MIT licensed](https://img.shields.io/crates/l/actix-http)
[![Dependency Status](https://deps.rs/crate/actix-http/2.2.0/status.svg)](https://deps.rs/crate/actix-http/2.2.0)
[![Join the chat at https://gitter.im/actix/actix-web](https://badges.gitter.im/actix/actix-web.svg)](https://gitter.im/actix/actix-web?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
[![Documentation](https://docs.rs/actix-http/badge.svg?version=3.0.0)](https://docs.rs/actix-http/3.0.0)
[![Version](https://img.shields.io/badge/rustc-1.54+-ab6000.svg)](https://blog.rust-lang.org/2021/05/06/Rust-1.54.0.html)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http.svg)
<br />
[![dependency status](https://deps.rs/crate/actix-http/3.0.0/status.svg)](https://deps.rs/crate/actix-http/3.0.0)
[![Download](https://img.shields.io/crates/d/actix-http.svg)](https://crates.io/crates/actix-http)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
## Documentation & Resources
- [API Documentation](https://docs.rs/actix-http)
- [Chat on Gitter](https://gitter.im/actix/actix-web)
- Minimum Supported Rust Version (MSRV): 1.42.0
- Minimum Supported Rust Version (MSRV): 1.54
## Example
@ -52,8 +54,8 @@ async fn main() -> io::Result<()> {
This project is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
* MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
- Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
- MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
at your option.

View File

@ -1,291 +0,0 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use bytes::BytesMut;
// benchmark sending all requests at the same time
fn bench_write_content_length(c: &mut Criterion) {
let mut group = c.benchmark_group("write_content_length");
let sizes = [
0, 1, 11, 83, 101, 653, 1001, 6323, 10001, 56329, 100001, 123456, 98724245,
4294967202,
];
for i in sizes.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_content_length(i, &mut b)
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_content_length(i, &mut b)
})
});
group.bench_with_input(BenchmarkId::new("itoa", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_itoa::write_content_length(i, &mut b)
})
});
}
group.finish();
}
criterion_group!(benches, bench_write_content_length);
criterion_main!(benches);
mod _itoa {
use bytes::{BufMut, BytesMut};
pub fn write_content_length(n: usize, bytes: &mut BytesMut) {
if n == 0 {
bytes.put_slice(b"\r\ncontent-length: 0\r\n");
return;
}
let mut buf = itoa::Buffer::new();
bytes.put_slice(b"\r\ncontent-length: ");
bytes.put_slice(buf.format(n).as_bytes());
bytes.put_slice(b"\r\n");
}
}
mod _new {
use bytes::{BufMut, BytesMut};
const DIGITS_START: u8 = b'0';
/// NOTE: bytes object has to contain enough space
pub fn write_content_length(n: usize, bytes: &mut BytesMut) {
if n == 0 {
bytes.put_slice(b"\r\ncontent-length: 0\r\n");
return;
}
bytes.put_slice(b"\r\ncontent-length: ");
if n < 10 {
bytes.put_u8(DIGITS_START + (n as u8));
} else if n < 100 {
let n = n as u8;
let d10 = n / 10;
let d1 = n % 10;
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 1000 {
let n = n as u16;
let d100 = (n / 100) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 10_000 {
let n = n as u16;
let d1000 = (n / 1000) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 100_000 {
let n = n as u32;
let d10000 = (n / 10000) as u8;
let d1000 = ((n / 1000) % 10) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d10000);
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 1_000_000 {
let n = n as u32;
let d100000 = (n / 100000) as u8;
let d10000 = ((n / 10000) % 10) as u8;
let d1000 = ((n / 1000) % 10) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100000);
bytes.put_u8(DIGITS_START + d10000);
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else {
write_usize(n, bytes);
}
bytes.put_slice(b"\r\n");
}
fn write_usize(n: usize, bytes: &mut BytesMut) {
let mut n = n;
// 20 chars is max length of a usize (2^64)
// digits will be added to the buffer from lsd to msd
let mut buf = BytesMut::with_capacity(20);
while n > 9 {
// "pop" the least-significant digit
let lsd = (n % 10) as u8;
// remove the lsd from n
n = n / 10;
buf.put_u8(DIGITS_START + lsd);
}
// put msd to result buffer
bytes.put_u8(DIGITS_START + (n as u8));
// put, in reverse (msd to lsd), remaining digits to buffer
for i in (0..buf.len()).rev() {
bytes.put_u8(buf[i]);
}
}
}
mod _original {
use std::{mem, ptr, slice};
use bytes::{BufMut, BytesMut};
const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\
2021222324252627282930313233343536373839\
4041424344454647484950515253545556575859\
6061626364656667686970717273747576777879\
8081828384858687888990919293949596979899";
/// NOTE: bytes object has to contain enough space
pub fn write_content_length(mut n: usize, bytes: &mut BytesMut) {
if n < 10 {
let mut buf: [u8; 21] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'\r', b'\n',
];
buf[18] = (n as u8) + b'0';
bytes.put_slice(&buf);
} else if n < 100 {
let mut buf: [u8; 22] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'\r', b'\n',
];
let d1 = n << 1;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(18),
2,
);
}
bytes.put_slice(&buf);
} else if n < 1000 {
let mut buf: [u8; 23] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'0', b'\r',
b'\n',
];
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(19),
2,
)
};
// decode last 1
buf[18] = (n as u8) + b'0';
bytes.put_slice(&buf);
} else {
bytes.put_slice(b"\r\ncontent-length: ");
convert_usize(n, bytes);
}
}
pub(crate) fn convert_usize(mut n: usize, bytes: &mut BytesMut) {
let mut curr: isize = 39;
let mut buf: [u8; 41] = unsafe { mem::MaybeUninit::uninit().assume_init() };
buf[39] = b'\r';
buf[40] = b'\n';
let buf_ptr = buf.as_mut_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
// eagerly decode 4 characters at a time
while n >= 10_000 {
let rem = (n % 10_000) as isize;
n /= 10_000;
let d1 = (rem / 100) << 1;
let d2 = (rem % 100) << 1;
curr -= 4;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
ptr::copy_nonoverlapping(
lut_ptr.offset(d2),
buf_ptr.offset(curr + 2),
2,
);
}
}
// if we reach here numbers are <= 9999, so at most 4 chars long
let mut n = n as isize; // possibly reduce 64bit math
// decode 2 more chars, if > 2 chars
if n >= 100 {
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0';
}
} else {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
unsafe {
bytes.extend_from_slice(slice::from_raw_parts(
buf_ptr.offset(curr),
41 - curr as usize,
));
}
}
}

View File

@ -0,0 +1,95 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
const CODES: &[u16] = &[0, 1000, 201, 800, 550];
fn bench_quality_display_impls(c: &mut Criterion) {
let mut group = c.benchmark_group("quality value display impls");
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("New (fast?)", i), i, |b, &i| {
b.iter(|| _new::Quality(i).to_string())
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| _naive::Quality(i).to_string())
});
}
group.finish();
}
criterion_group!(benches, bench_quality_display_impls);
criterion_main!(benches);
mod _new {
use std::fmt;
pub struct Quality(pub(crate) u16);
impl fmt::Display for Quality {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0 {
0 => f.write_str("0"),
1000 => f.write_str("1"),
// some number in the range 1999
x => {
f.write_str("0.")?;
// this implementation avoids string allocation otherwise required
// for `.trim_end_matches('0')`
if x < 10 {
f.write_str("00")?;
// 0 is handled so it's not possible to have a trailing 0, we can just return
itoa_fmt(f, x)
} else if x < 100 {
f.write_str("0")?;
if x % 10 == 0 {
// trailing 0, divide by 10 and write
itoa_fmt(f, x / 10)
} else {
itoa_fmt(f, x)
}
} else {
// x is in range 101999
if x % 100 == 0 {
// two trailing 0s, divide by 100 and write
itoa_fmt(f, x / 100)
} else if x % 10 == 0 {
// one trailing 0, divide by 10 and write
itoa_fmt(f, x / 10)
} else {
itoa_fmt(f, x)
}
}
}
}
}
}
pub fn itoa_fmt<W: fmt::Write, V: itoa::Integer>(mut wr: W, value: V) -> fmt::Result {
let mut buf = itoa::Buffer::new();
wr.write_str(buf.format(value))
}
}
mod _naive {
use std::fmt;
pub struct Quality(pub(crate) u16);
impl fmt::Display for Quality {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.0 {
0 => f.write_str("0"),
1000 => f.write_str("1"),
x => {
write!(f, "{}", format!("{:03}", x).trim_end_matches('0'))
}
}
}
}
}

View File

@ -176,7 +176,7 @@ mod _original {
buf[5] = b'0';
buf[7] = b'9';
}
_ => (),
_ => {}
}
let mut curr: isize = 12;
@ -189,11 +189,7 @@ mod _original {
n /= 100;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(
lut_ptr.offset(d1 as isize),
buf_ptr.offset(curr),
2,
);
ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(curr), 2);
}
// decode last 1 or 2 chars
@ -206,11 +202,7 @@ mod _original {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(
lut_ptr.offset(d1 as isize),
buf_ptr.offset(curr),
2,
);
ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(curr), 2);
}
}

View File

@ -54,15 +54,10 @@ const EMPTY_HEADER_INDEX: HeaderIndex = HeaderIndex {
value: (0, 0),
};
const EMPTY_HEADER_INDEX_ARRAY: [HeaderIndex; MAX_HEADERS] =
[EMPTY_HEADER_INDEX; MAX_HEADERS];
const EMPTY_HEADER_INDEX_ARRAY: [HeaderIndex; MAX_HEADERS] = [EMPTY_HEADER_INDEX; MAX_HEADERS];
impl HeaderIndex {
fn record(
bytes: &[u8],
headers: &[httparse::Header<'_>],
indices: &mut [HeaderIndex],
) {
fn record(bytes: &[u8], headers: &[httparse::Header<'_>], indices: &mut [HeaderIndex]) {
let bytes_ptr = bytes.as_ptr() as usize;
for (header, indices) in headers.iter().zip(indices.iter_mut()) {
let name_start = header.name.as_ptr() as usize - bytes_ptr;
@ -78,12 +73,12 @@ impl HeaderIndex {
// test cases taken from:
// https://github.com/seanmonstar/httparse/blob/master/benches/parse.rs
const REQ_SHORT: &'static [u8] = b"\
const REQ_SHORT: &[u8] = b"\
GET / HTTP/1.0\r\n\
Host: example.com\r\n\
Cookie: session=60; user_id=1\r\n\r\n";
const REQ: &'static [u8] = b"\
const REQ: &[u8] = b"\
GET /wp-content/uploads/2010/03/hello-kitty-darth-vader-pink.jpg HTTP/1.1\r\n\
Host: www.kittyhell.com\r\n\
User-Agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; ja-JP-mac; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 Pathtraq/0.9\r\n\
@ -119,6 +114,8 @@ mod _original {
use std::mem::MaybeUninit;
pub fn parse_headers(src: &mut BytesMut) -> usize {
#![allow(clippy::uninit_assumed_init)]
let mut headers: [HeaderIndex; MAX_HEADERS] =
unsafe { MaybeUninit::uninit().assume_init() };

View File

@ -0,0 +1,93 @@
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
fn bench_write_camel_case(c: &mut Criterion) {
let mut group = c.benchmark_group("write_camel_case");
let names = ["connection", "Transfer-Encoding", "transfer-encoding"];
for &i in &names {
let bts = i.as_bytes();
group.bench_with_input(BenchmarkId::new("Original", i), bts, |b, bts| {
b.iter(|| {
let mut buf = black_box([0; 24]);
_original::write_camel_case(black_box(bts), &mut buf)
});
});
group.bench_with_input(BenchmarkId::new("New", i), bts, |b, bts| {
b.iter(|| {
let mut buf = black_box([0; 24]);
let len = black_box(bts.len());
_new::write_camel_case(black_box(bts), buf.as_mut_ptr(), len)
});
});
}
group.finish();
}
criterion_group!(benches, bench_write_camel_case);
criterion_main!(benches);
mod _new {
pub fn write_camel_case(value: &[u8], buf: *mut u8, len: usize) {
// first copy entire (potentially wrong) slice to output
let buffer = unsafe {
std::ptr::copy_nonoverlapping(value.as_ptr(), buf, len);
std::slice::from_raw_parts_mut(buf, len)
};
let mut iter = value.iter();
// first character should be uppercase
if let Some(c @ b'a'..=b'z') = iter.next() {
buffer[0] = c & 0b1101_1111;
}
// track 1 ahead of the current position since that's the location being assigned to
let mut index = 2;
// remaining characters after hyphens should also be uppercase
while let Some(&c) = iter.next() {
if c == b'-' {
// advance iter by one and uppercase if needed
if let Some(c @ b'a'..=b'z') = iter.next() {
buffer[index] = c & 0b1101_1111;
}
}
index += 1;
}
}
}
mod _original {
pub fn write_camel_case(value: &[u8], buffer: &mut [u8]) {
let mut index = 0;
let key = value;
let mut key_iter = key.iter();
if let Some(c) = key_iter.next() {
if *c >= b'a' && *c <= b'z' {
buffer[index] = *c ^ b' ';
index += 1;
}
} else {
return;
}
while let Some(c) = key_iter.next() {
buffer[index] = *c;
index += 1;
if *c == b'-' {
if let Some(c) = key_iter.next() {
if *c >= b'a' && *c <= b'z' {
buffer[index] = *c ^ b' ';
index += 1;
}
}
}
}
}
}

View File

@ -0,0 +1,26 @@
use actix_http::HttpService;
use actix_server::Server;
use actix_service::map_config;
use actix_web::{dev::AppConfig, get, App};
#[get("/")]
async fn index() -> &'static str {
"Hello, world. From Actix Web!"
}
#[tokio::main(flavor = "current_thread")]
async fn main() -> std::io::Result<()> {
Server::build()
.bind("hello-world", "127.0.0.1:8080", || {
// construct actix-web app
let app = App::new().service(index);
HttpService::build()
// pass the app to service builder
// map_config is used to map App's configuration to ServiceBuilder
.finish(map_config(app, |_| AppConfig::default()))
.tcp()
})?
.run()
.await
}

View File

@ -0,0 +1,27 @@
use std::{convert::Infallible, io, time::Duration};
use actix_http::{HttpService, Request, Response, StatusCode};
use actix_server::Server;
use once_cell::sync::Lazy;
static STR: Lazy<String> = Lazy::new(|| "HELLO WORLD ".repeat(20));
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("dispatcher-benchmark", ("127.0.0.1", 8080), || {
HttpService::build()
.client_request_timeout(Duration::from_secs(1))
.finish(|_: Request| async move {
let mut res = Response::build(StatusCode::OK);
Ok::<_, Infallible>(res.body(&**STR))
})
.tcp()
})?
// limiting number of workers so that bench client is not sharing as many resources
.workers(4)
.run()
.await
}

View File

@ -1,35 +1,36 @@
use std::{env, io};
use std::{io, time::Duration};
use actix_http::{Error, HttpService, Request, Response};
use actix_http::{Error, HttpService, Request, Response, StatusCode};
use actix_server::Server;
use bytes::BytesMut;
use futures_util::StreamExt;
use futures_util::StreamExt as _;
use http::header::HeaderValue;
use log::info;
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "echo=info");
env_logger::init();
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("echo", "127.0.0.1:8080", || {
.bind("echo", ("127.0.0.1", 8080), || {
HttpService::build()
.client_timeout(1000)
.client_disconnect(1000)
.client_request_timeout(Duration::from_secs(1))
.client_disconnect_timeout(Duration::from_secs(1))
// handles HTTP/1.1 and HTTP/2
.finish(|mut req: Request| async move {
let mut body = BytesMut::new();
while let Some(item) = req.payload().next().await {
body.extend_from_slice(&item?);
}
info!("request body: {:?}", body);
Ok::<_, Error>(
Response::Ok()
.header("x-head", HeaderValue::from_static("dummy value!"))
.body(body),
)
log::info!("request body: {:?}", body);
let res = Response::build(StatusCode::OK)
.insert_header(("x-head", HeaderValue::from_static("dummy value!")))
.body(body);
Ok::<_, Error>(res)
})
// No TLS
.tcp()
})?
.run()

View File

@ -1,32 +1,34 @@
use std::{env, io};
use std::io;
use actix_http::http::HeaderValue;
use actix_http::{Error, HttpService, Request, Response};
use actix_server::Server;
use bytes::BytesMut;
use futures_util::StreamExt;
use log::info;
use actix_http::{
body::{BodyStream, MessageBody},
header, Error, HttpMessage, HttpService, Request, Response, StatusCode,
};
async fn handle_request(mut req: Request) -> Result<Response, Error> {
let mut body = BytesMut::new();
while let Some(item) = req.payload().next().await {
body.extend_from_slice(&item?)
async fn handle_request(mut req: Request) -> Result<Response<impl MessageBody>, Error> {
let mut res = Response::build(StatusCode::OK);
if let Some(ct) = req.headers().get(header::CONTENT_TYPE) {
res.insert_header((header::CONTENT_TYPE, ct));
}
info!("request body: {:?}", body);
Ok(Response::Ok()
.header("x-head", HeaderValue::from_static("dummy value!"))
.body(body))
// echo request payload stream as (chunked) response body
let res = res.message_body(BodyStream::new(req.payload().take()))?;
Ok(res)
}
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "echo=info");
env_logger::init();
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("echo", "127.0.0.1:8080", || {
HttpService::build().finish(handle_request).tcp()
actix_server::Server::build()
.bind("echo", ("127.0.0.1", 8080), || {
HttpService::build()
// handles HTTP/1.1 only
.h1(handle_request)
// No TLS
.tcp()
})?
.run()
.await

View File

@ -0,0 +1,25 @@
use std::{convert::Infallible, io};
use actix_http::{HttpService, Request, Response, StatusCode};
use actix_server::Server;
use once_cell::sync::Lazy;
static STR: Lazy<String> = Lazy::new(|| "HELLO WORLD ".repeat(100));
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("h2spec", ("127.0.0.1", 8080), || {
HttpService::build()
.h2(|_: Request| async move {
let mut res = Response::build(StatusCode::OK);
Ok::<_, Infallible>(res.body(&**STR))
})
.tcp()
})?
.workers(4)
.run()
.await
}

View File

@ -1,26 +1,35 @@
use std::{env, io};
use std::{convert::Infallible, io, time::Duration};
use actix_http::{HttpService, Response};
use actix_http::{
header::HeaderValue, HttpMessage, HttpService, Request, Response, StatusCode,
};
use actix_server::Server;
use futures_util::future;
use http::header::HeaderValue;
use log::info;
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "hello_world=info");
env_logger::init();
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("hello-world", "127.0.0.1:8080", || {
.bind("hello-world", ("127.0.0.1", 8080), || {
HttpService::build()
.client_timeout(1000)
.client_disconnect(1000)
.finish(|_req| {
info!("{:?}", _req);
let mut res = Response::Ok();
res.header("x-head", HeaderValue::from_static("dummy value!"));
future::ok::<_, ()>(res.body("Hello world!"))
.client_request_timeout(Duration::from_secs(1))
.client_disconnect_timeout(Duration::from_secs(1))
.on_connect_ext(|_, ext| {
ext.insert(42u32);
})
.finish(|req: Request| async move {
log::info!("{:?}", req);
let mut res = Response::build(StatusCode::OK);
res.insert_header(("x-head", HeaderValue::from_static("dummy value!")));
let forty_two = req.extensions().get::<u32>().unwrap().to_string();
res.insert_header((
"x-forty-two",
HeaderValue::from_str(&forty_two).unwrap(),
));
Ok::<_, Infallible>(res.body("Hello world!"))
})
.tcp()
})?

View File

@ -0,0 +1,40 @@
//! Example showing response body (chunked) stream erroring.
//!
//! Test using `nc` or `curl`.
//! ```sh
//! $ curl -vN 127.0.0.1:8080
//! $ echo 'GET / HTTP/1.1\n\n' | nc 127.0.0.1 8080
//! ```
use std::{convert::Infallible, io, time::Duration};
use actix_http::{body::BodyStream, HttpService, Response};
use actix_server::Server;
use async_stream::stream;
use bytes::Bytes;
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("streaming-error", ("127.0.0.1", 8080), || {
HttpService::build()
.finish(|req| async move {
log::info!("{:?}", req);
let res = Response::ok();
Ok::<_, Infallible>(res.set_body(BodyStream::new(stream! {
yield Ok(Bytes::from("123"));
yield Ok(Bytes::from("456"));
actix_rt::time::sleep(Duration::from_millis(1000)).await;
yield Err(io::Error::new(io::ErrorKind::Other, ""));
})))
})
.tcp()
})?
.run()
.await
}

112
actix-http/examples/ws.rs Normal file
View File

@ -0,0 +1,112 @@
//! Sets up a WebSocket server over TCP and TLS.
//! Sends a heartbeat message every 4 seconds but does not respond to any incoming frames.
extern crate tls_rustls as rustls;
use std::{
io,
pin::Pin,
task::{Context, Poll},
time::Duration,
};
use actix_codec::Encoder;
use actix_http::{body::BodyStream, error::Error, ws, HttpService, Request, Response};
use actix_rt::time::{interval, Interval};
use actix_server::Server;
use bytes::{Bytes, BytesMut};
use bytestring::ByteString;
use futures_core::{ready, Stream};
#[actix_rt::main]
async fn main() -> io::Result<()> {
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
Server::build()
.bind("tcp", ("127.0.0.1", 8080), || {
HttpService::build().h1(handler).tcp()
})?
.bind("tls", ("127.0.0.1", 8443), || {
HttpService::build().finish(handler).rustls(tls_config())
})?
.run()
.await
}
async fn handler(req: Request) -> Result<Response<BodyStream<Heartbeat>>, Error> {
log::info!("handshaking");
let mut res = ws::handshake(req.head())?;
// handshake will always fail under HTTP/2
log::info!("responding");
Ok(res.message_body(BodyStream::new(Heartbeat::new(ws::Codec::new())))?)
}
struct Heartbeat {
codec: ws::Codec,
interval: Interval,
}
impl Heartbeat {
fn new(codec: ws::Codec) -> Self {
Self {
codec,
interval: interval(Duration::from_secs(4)),
}
}
}
impl Stream for Heartbeat {
type Item = Result<Bytes, Error>;
fn poll_next(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
log::trace!("poll");
ready!(self.as_mut().interval.poll_tick(cx));
let mut buffer = BytesMut::new();
self.as_mut()
.codec
.encode(
ws::Message::Text(ByteString::from_static("hello world")),
&mut buffer,
)
.unwrap();
Poll::Ready(Some(Ok(buffer.freeze())))
}
}
fn tls_config() -> rustls::ServerConfig {
use std::io::BufReader;
use rustls::{Certificate, PrivateKey};
use rustls_pemfile::{certs, pkcs8_private_keys};
let cert = rcgen::generate_simple_self_signed(vec!["localhost".to_owned()]).unwrap();
let cert_file = cert.serialize_pem().unwrap();
let key_file = cert.serialize_private_key_pem();
let cert_file = &mut BufReader::new(cert_file.as_bytes());
let key_file = &mut BufReader::new(key_file.as_bytes());
let cert_chain = certs(cert_file)
.unwrap()
.into_iter()
.map(Certificate)
.collect();
let mut keys = pkcs8_private_keys(key_file).unwrap();
let mut config = rustls::ServerConfig::builder()
.with_safe_defaults()
.with_no_client_auth()
.with_single_cert(cert_chain, PrivateKey(keys.remove(0)))
.unwrap();
config.alpn_protocols.push(b"http/1.1".to_vec());
config.alpn_protocols.push(b"h2".to_vec());
config
}

View File

@ -1,5 +0,0 @@
max_width = 89
reorder_imports = true
#wrap_comments = true
#fn_args_density = "Compressed"
#use_small_heuristics = false

View File

@ -1,723 +0,0 @@
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, mem};
use bytes::{Bytes, BytesMut};
use futures_core::Stream;
use futures_util::ready;
use pin_project::pin_project;
use crate::error::Error;
#[derive(Debug, PartialEq, Copy, Clone)]
/// Body size hint
pub enum BodySize {
None,
Empty,
Sized(u64),
Stream,
}
impl BodySize {
pub fn is_eof(&self) -> bool {
matches!(self, BodySize::None | BodySize::Empty | BodySize::Sized(0))
}
}
/// Type that provides this trait can be streamed to a peer.
pub trait MessageBody {
fn size(&self) -> BodySize;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>>;
downcast_get_type_id!();
}
downcast!(MessageBody);
impl MessageBody for () {
fn size(&self) -> BodySize {
BodySize::Empty
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
Poll::Ready(None)
}
}
impl<T: MessageBody + Unpin> MessageBody for Box<T> {
fn size(&self) -> BodySize {
self.as_ref().size()
}
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
Pin::new(self.get_mut().as_mut()).poll_next(cx)
}
}
#[pin_project(project = ResponseBodyProj)]
pub enum ResponseBody<B> {
Body(#[pin] B),
Other(#[pin] Body),
}
impl ResponseBody<Body> {
pub fn into_body<B>(self) -> ResponseBody<B> {
match self {
ResponseBody::Body(b) => ResponseBody::Other(b),
ResponseBody::Other(b) => ResponseBody::Other(b),
}
}
}
impl<B> ResponseBody<B> {
pub fn take_body(&mut self) -> ResponseBody<B> {
std::mem::replace(self, ResponseBody::Other(Body::None))
}
}
impl<B: MessageBody> ResponseBody<B> {
pub fn as_ref(&self) -> Option<&B> {
if let ResponseBody::Body(ref b) = self {
Some(b)
} else {
None
}
}
}
impl<B: MessageBody> MessageBody for ResponseBody<B> {
fn size(&self) -> BodySize {
match self {
ResponseBody::Body(ref body) => body.size(),
ResponseBody::Other(ref body) => body.size(),
}
}
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
match self.project() {
ResponseBodyProj::Body(body) => body.poll_next(cx),
ResponseBodyProj::Other(body) => body.poll_next(cx),
}
}
}
impl<B: MessageBody> Stream for ResponseBody<B> {
type Item = Result<Bytes, Error>;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
match self.project() {
ResponseBodyProj::Body(body) => body.poll_next(cx),
ResponseBodyProj::Other(body) => body.poll_next(cx),
}
}
}
#[pin_project(project = BodyProj)]
/// Represents various types of http message body.
pub enum Body {
/// Empty response. `Content-Length` header is not set.
None,
/// Zero sized response body. `Content-Length` header is set to `0`.
Empty,
/// Specific response body.
Bytes(Bytes),
/// Generic message body.
Message(Box<dyn MessageBody + Unpin>),
}
impl Body {
/// Create body from slice (copy)
pub fn from_slice(s: &[u8]) -> Body {
Body::Bytes(Bytes::copy_from_slice(s))
}
/// Create body from generic message body.
pub fn from_message<B: MessageBody + Unpin + 'static>(body: B) -> Body {
Body::Message(Box::new(body))
}
}
impl MessageBody for Body {
fn size(&self) -> BodySize {
match self {
Body::None => BodySize::None,
Body::Empty => BodySize::Empty,
Body::Bytes(ref bin) => BodySize::Sized(bin.len() as u64),
Body::Message(ref body) => body.size(),
}
}
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
match self.project() {
BodyProj::None => Poll::Ready(None),
BodyProj::Empty => Poll::Ready(None),
BodyProj::Bytes(ref mut bin) => {
let len = bin.len();
if len == 0 {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(bin))))
}
}
BodyProj::Message(ref mut body) => Pin::new(body.as_mut()).poll_next(cx),
}
}
}
impl PartialEq for Body {
fn eq(&self, other: &Body) -> bool {
match *self {
Body::None => matches!(*other, Body::None),
Body::Empty => matches!(*other, Body::Empty),
Body::Bytes(ref b) => match *other {
Body::Bytes(ref b2) => b == b2,
_ => false,
},
Body::Message(_) => false,
}
}
}
impl fmt::Debug for Body {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Body::None => write!(f, "Body::None"),
Body::Empty => write!(f, "Body::Empty"),
Body::Bytes(ref b) => write!(f, "Body::Bytes({:?})", b),
Body::Message(_) => write!(f, "Body::Message(_)"),
}
}
}
impl From<&'static str> for Body {
fn from(s: &'static str) -> Body {
Body::Bytes(Bytes::from_static(s.as_ref()))
}
}
impl From<&'static [u8]> for Body {
fn from(s: &'static [u8]) -> Body {
Body::Bytes(Bytes::from_static(s))
}
}
impl From<Vec<u8>> for Body {
fn from(vec: Vec<u8>) -> Body {
Body::Bytes(Bytes::from(vec))
}
}
impl From<String> for Body {
fn from(s: String) -> Body {
s.into_bytes().into()
}
}
impl<'a> From<&'a String> for Body {
fn from(s: &'a String) -> Body {
Body::Bytes(Bytes::copy_from_slice(AsRef::<[u8]>::as_ref(&s)))
}
}
impl From<Bytes> for Body {
fn from(s: Bytes) -> Body {
Body::Bytes(s)
}
}
impl From<BytesMut> for Body {
fn from(s: BytesMut) -> Body {
Body::Bytes(s.freeze())
}
}
impl From<serde_json::Value> for Body {
fn from(v: serde_json::Value) -> Body {
Body::Bytes(v.to_string().into())
}
}
impl<S> From<SizedStream<S>> for Body
where
S: Stream<Item = Result<Bytes, Error>> + Unpin + 'static,
{
fn from(s: SizedStream<S>) -> Body {
Body::from_message(s)
}
}
impl<S, E> From<BodyStream<S, E>> for Body
where
S: Stream<Item = Result<Bytes, E>> + Unpin + 'static,
E: Into<Error> + 'static,
{
fn from(s: BodyStream<S, E>) -> Body {
Body::from_message(s)
}
}
impl MessageBody for Bytes {
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()))))
}
}
}
impl MessageBody for BytesMut {
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()).freeze())))
}
}
}
impl MessageBody for &'static str {
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from_static(
mem::take(self.get_mut()).as_ref(),
))))
}
}
}
impl MessageBody for Vec<u8> {
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from(mem::take(self.get_mut())))))
}
}
}
impl MessageBody for String {
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from(
mem::take(self.get_mut()).into_bytes(),
))))
}
}
}
/// Type represent streaming body.
/// Response does not contain `content-length` header and appropriate transfer encoding is used.
#[pin_project]
pub struct BodyStream<S: Unpin, E> {
#[pin]
stream: S,
_t: PhantomData<E>,
}
impl<S, E> BodyStream<S, E>
where
S: Stream<Item = Result<Bytes, E>> + Unpin,
E: Into<Error>,
{
pub fn new(stream: S) -> Self {
BodyStream {
stream,
_t: PhantomData,
}
}
}
impl<S, E> MessageBody for BodyStream<S, E>
where
S: Stream<Item = Result<Bytes, E>> + Unpin,
E: Into<Error>,
{
fn size(&self) -> BodySize {
BodySize::Stream
}
/// Attempts to pull out the next value of the underlying [`Stream`].
///
/// Empty values are skipped to prevent [`BodyStream`]'s transmission being
/// ended on a zero-length chunk, but rather proceed until the underlying
/// [`Stream`] ends.
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
let mut stream = self.project().stream;
loop {
let stream = stream.as_mut();
return Poll::Ready(match ready!(stream.poll_next(cx)) {
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
opt => opt.map(|res| res.map_err(Into::into)),
});
}
}
}
/// Type represent streaming body. This body implementation should be used
/// if total size of stream is known. Data get sent as is without using transfer encoding.
#[pin_project]
pub struct SizedStream<S: Unpin> {
size: u64,
#[pin]
stream: S,
}
impl<S> SizedStream<S>
where
S: Stream<Item = Result<Bytes, Error>> + Unpin,
{
pub fn new(size: u64, stream: S) -> Self {
SizedStream { size, stream }
}
}
impl<S> MessageBody for SizedStream<S>
where
S: Stream<Item = Result<Bytes, Error>> + Unpin,
{
fn size(&self) -> BodySize {
BodySize::Sized(self.size as u64)
}
/// Attempts to pull out the next value of the underlying [`Stream`].
///
/// Empty values are skipped to prevent [`SizedStream`]'s transmission being
/// ended on a zero-length chunk, but rather proceed until the underlying
/// [`Stream`] ends.
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
let mut stream: Pin<&mut S> = self.project().stream;
loop {
let stream = stream.as_mut();
return Poll::Ready(match ready!(stream.poll_next(cx)) {
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
val => val,
});
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures_util::future::poll_fn;
use futures_util::pin_mut;
use futures_util::stream;
impl Body {
pub(crate) fn get_ref(&self) -> &[u8] {
match *self {
Body::Bytes(ref bin) => &bin,
_ => panic!(),
}
}
}
impl ResponseBody<Body> {
pub(crate) fn get_ref(&self) -> &[u8] {
match *self {
ResponseBody::Body(ref b) => b.get_ref(),
ResponseBody::Other(ref b) => b.get_ref(),
}
}
}
#[actix_rt::test]
async fn test_static_str() {
assert_eq!(Body::from("").size(), BodySize::Sized(0));
assert_eq!(Body::from("test").size(), BodySize::Sized(4));
assert_eq!(Body::from("test").get_ref(), b"test");
assert_eq!("test".size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| Pin::new(&mut "test").poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_static_bytes() {
assert_eq!(Body::from(b"test".as_ref()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b"test".as_ref()).get_ref(), b"test");
assert_eq!(
Body::from_slice(b"test".as_ref()).size(),
BodySize::Sized(4)
);
assert_eq!(Body::from_slice(b"test".as_ref()).get_ref(), b"test");
let sb = Bytes::from(&b"test"[..]);
pin_mut!(sb);
assert_eq!(sb.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| sb.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_vec() {
assert_eq!(Body::from(Vec::from("test")).size(), BodySize::Sized(4));
assert_eq!(Body::from(Vec::from("test")).get_ref(), b"test");
let test_vec = Vec::from("test");
pin_mut!(test_vec);
assert_eq!(test_vec.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| test_vec.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_bytes() {
let b = Bytes::from("test");
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
pin_mut!(b);
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_bytes_mut() {
let b = BytesMut::from("test");
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
pin_mut!(b);
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_string() {
let b = "test".to_owned();
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
assert_eq!(Body::from(&b).size(), BodySize::Sized(4));
assert_eq!(Body::from(&b).get_ref(), b"test");
pin_mut!(b);
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_unit() {
assert_eq!(().size(), BodySize::Empty);
assert!(poll_fn(|cx| Pin::new(&mut ()).poll_next(cx))
.await
.is_none());
}
#[actix_rt::test]
async fn test_box() {
let val = Box::new(());
pin_mut!(val);
assert_eq!(val.size(), BodySize::Empty);
assert!(poll_fn(|cx| val.as_mut().poll_next(cx)).await.is_none());
}
#[actix_rt::test]
async fn test_body_eq() {
assert!(
Body::Bytes(Bytes::from_static(b"1"))
== Body::Bytes(Bytes::from_static(b"1"))
);
assert!(Body::Bytes(Bytes::from_static(b"1")) != Body::None);
}
#[actix_rt::test]
async fn test_body_debug() {
assert!(format!("{:?}", Body::None).contains("Body::None"));
assert!(format!("{:?}", Body::Empty).contains("Body::Empty"));
assert!(format!("{:?}", Body::Bytes(Bytes::from_static(b"1"))).contains('1'));
}
#[actix_rt::test]
async fn test_serde_json() {
use serde_json::json;
assert_eq!(
Body::from(serde_json::Value::String("test".into())).size(),
BodySize::Sized(6)
);
assert_eq!(
Body::from(json!({"test-key":"test-value"})).size(),
BodySize::Sized(25)
);
}
mod body_stream {
use super::*;
//use futures::task::noop_waker;
//use futures::stream::once;
#[actix_rt::test]
async fn skips_empty_chunks() {
let body = BodyStream::new(stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok(Bytes::from(v)) as Result<Bytes, ()>),
));
pin_mut!(body);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("1")),
);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("2")),
);
}
/* Now it does not compile as it should
#[actix_rt::test]
async fn move_pinned_pointer() {
let (sender, receiver) = futures::channel::oneshot::channel();
let mut body_stream = Ok(BodyStream::new(once(async {
let x = Box::new(0i32);
let y = &x;
receiver.await.unwrap();
let _z = **y;
Ok::<_, ()>(Bytes::new())
})));
let waker = noop_waker();
let mut context = Context::from_waker(&waker);
pin_mut!(body_stream);
let _ = body_stream.as_mut().unwrap().poll_next(&mut context);
sender.send(()).unwrap();
let _ = std::mem::replace(&mut body_stream, Err([0; 32])).unwrap().poll_next(&mut context);
}*/
}
mod sized_stream {
use super::*;
#[actix_rt::test]
async fn skips_empty_chunks() {
let body = SizedStream::new(
2,
stream::iter(["1", "", "2"].iter().map(|&v| Ok(Bytes::from(v)))),
);
pin_mut!(body);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("1")),
);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("2")),
);
}
}
#[actix_rt::test]
async fn test_body_casting() {
let mut body = String::from("hello cast");
let resp_body: &mut dyn MessageBody = &mut body;
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast");
let body = &mut resp_body.downcast_mut::<String>().unwrap();
body.push('!');
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast!");
let not_body = resp_body.downcast_ref::<()>();
assert!(not_body.is_none());
}
}

View File

@ -0,0 +1,215 @@
use std::{
error::Error as StdError,
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use futures_core::{ready, Stream};
use pin_project_lite::pin_project;
use super::{BodySize, MessageBody};
pin_project! {
/// Streaming response wrapper.
///
/// Response does not contain `Content-Length` header and appropriate transfer encoding is used.
pub struct BodyStream<S> {
#[pin]
stream: S,
}
}
// TODO: from_infallible method
impl<S, E> BodyStream<S>
where
S: Stream<Item = Result<Bytes, E>>,
E: Into<Box<dyn StdError>> + 'static,
{
#[inline]
pub fn new(stream: S) -> Self {
BodyStream { stream }
}
}
impl<S, E> MessageBody for BodyStream<S>
where
S: Stream<Item = Result<Bytes, E>>,
E: Into<Box<dyn StdError>> + 'static,
{
type Error = E;
#[inline]
fn size(&self) -> BodySize {
BodySize::Stream
}
/// Attempts to pull out the next value of the underlying [`Stream`].
///
/// Empty values are skipped to prevent [`BodyStream`]'s transmission being
/// ended on a zero-length chunk, but rather proceed until the underlying
/// [`Stream`] ends.
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
loop {
let stream = self.as_mut().project().stream;
let chunk = match ready!(stream.poll_next(cx)) {
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
opt => opt,
};
return Poll::Ready(chunk);
}
}
}
#[cfg(test)]
mod tests {
use std::{convert::Infallible, time::Duration};
use actix_rt::{
pin,
time::{sleep, Sleep},
};
use actix_utils::future::poll_fn;
use derive_more::{Display, Error};
use futures_core::ready;
use futures_util::{stream, FutureExt as _};
use pin_project_lite::pin_project;
use static_assertions::{assert_impl_all, assert_not_impl_any};
use super::*;
use crate::body::to_bytes;
assert_impl_all!(BodyStream<stream::Empty<Result<Bytes, crate::Error>>>: MessageBody);
assert_impl_all!(BodyStream<stream::Empty<Result<Bytes, &'static str>>>: MessageBody);
assert_impl_all!(BodyStream<stream::Repeat<Result<Bytes, &'static str>>>: MessageBody);
assert_impl_all!(BodyStream<stream::Empty<Result<Bytes, Infallible>>>: MessageBody);
assert_impl_all!(BodyStream<stream::Repeat<Result<Bytes, Infallible>>>: MessageBody);
assert_not_impl_any!(BodyStream<stream::Empty<Bytes>>: MessageBody);
assert_not_impl_any!(BodyStream<stream::Repeat<Bytes>>: MessageBody);
// crate::Error is not Clone
assert_not_impl_any!(BodyStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody);
#[actix_rt::test]
async fn skips_empty_chunks() {
let body = BodyStream::new(stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok::<_, Infallible>(Bytes::from(v))),
));
pin!(body);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("1")),
);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("2")),
);
}
#[actix_rt::test]
async fn read_to_bytes() {
let body = BodyStream::new(stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok::<_, Infallible>(Bytes::from(v))),
));
assert_eq!(to_bytes(body).await.ok(), Some(Bytes::from("12")));
}
#[derive(Debug, Display, Error)]
#[display(fmt = "stream error")]
struct StreamErr;
#[actix_rt::test]
async fn stream_immediate_error() {
let body = BodyStream::new(stream::once(async { Err(StreamErr) }));
assert!(matches!(to_bytes(body).await, Err(StreamErr)));
}
#[actix_rt::test]
async fn stream_string_error() {
// `&'static str` does not impl `Error`
// but it does impl `Into<Box<dyn Error>>`
let body = BodyStream::new(stream::once(async { Err("stringy error") }));
assert!(matches!(to_bytes(body).await, Err("stringy error")));
}
#[actix_rt::test]
async fn stream_boxed_error() {
// `Box<dyn Error>` does not impl `Error`
// but it does impl `Into<Box<dyn Error>>`
let body = BodyStream::new(stream::once(async {
Err(Box::<dyn StdError>::from("stringy error"))
}));
assert_eq!(
to_bytes(body).await.unwrap_err().to_string(),
"stringy error"
);
}
#[actix_rt::test]
async fn stream_delayed_error() {
let body = BodyStream::new(stream::iter(vec![Ok(Bytes::from("1")), Err(StreamErr)]));
assert!(matches!(to_bytes(body).await, Err(StreamErr)));
pin_project! {
#[derive(Debug)]
#[project = TimeDelayStreamProj]
enum TimeDelayStream {
Start,
Sleep { delay: Pin<Box<Sleep>> },
Done,
}
}
impl Stream for TimeDelayStream {
type Item = Result<Bytes, StreamErr>;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
match self.as_mut().get_mut() {
TimeDelayStream::Start => {
let sleep = sleep(Duration::from_millis(1));
self.as_mut().set(TimeDelayStream::Sleep {
delay: Box::pin(sleep),
});
cx.waker().wake_by_ref();
Poll::Pending
}
TimeDelayStream::Sleep { ref mut delay } => {
ready!(delay.poll_unpin(cx));
self.set(TimeDelayStream::Done);
cx.waker().wake_by_ref();
Poll::Pending
}
TimeDelayStream::Done => Poll::Ready(Some(Err(StreamErr))),
}
}
}
let body = BodyStream::new(TimeDelayStream::Start);
assert!(matches!(to_bytes(body).await, Err(StreamErr)));
}
}

View File

@ -0,0 +1,126 @@
use std::{
error::Error as StdError,
fmt,
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use super::{BodySize, MessageBody, MessageBodyMapErr};
use crate::body;
/// A boxed message body with boxed errors.
#[derive(Debug)]
pub struct BoxBody(BoxBodyInner);
enum BoxBodyInner {
None(body::None),
Bytes(Bytes),
Stream(Pin<Box<dyn MessageBody<Error = Box<dyn StdError>>>>),
}
impl fmt::Debug for BoxBodyInner {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::None(arg0) => f.debug_tuple("None").field(arg0).finish(),
Self::Bytes(arg0) => f.debug_tuple("Bytes").field(arg0).finish(),
Self::Stream(_) => f.debug_tuple("Stream").field(&"dyn MessageBody").finish(),
}
}
}
impl BoxBody {
/// Boxes body type, erasing type information.
///
/// If the body type to wrap is unknown or generic it is better to use [`MessageBody::boxed`] to
/// avoid double boxing.
#[inline]
pub fn new<B>(body: B) -> Self
where
B: MessageBody + 'static,
{
match body.size() {
BodySize::None => Self(BoxBodyInner::None(body::None)),
_ => match body.try_into_bytes() {
Ok(bytes) => Self(BoxBodyInner::Bytes(bytes)),
Err(body) => {
let body = MessageBodyMapErr::new(body, Into::into);
Self(BoxBodyInner::Stream(Box::pin(body)))
}
},
}
}
/// Returns a mutable pinned reference to the inner message body type.
#[inline]
pub fn as_pin_mut(&mut self) -> Pin<&mut Self> {
Pin::new(self)
}
}
impl MessageBody for BoxBody {
type Error = Box<dyn StdError>;
#[inline]
fn size(&self) -> BodySize {
match &self.0 {
BoxBodyInner::None(none) => none.size(),
BoxBodyInner::Bytes(bytes) => bytes.size(),
BoxBodyInner::Stream(stream) => stream.size(),
}
}
#[inline]
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
match &mut self.0 {
BoxBodyInner::None(body) => {
Pin::new(body).poll_next(cx).map_err(|err| match err {})
}
BoxBodyInner::Bytes(body) => {
Pin::new(body).poll_next(cx).map_err(|err| match err {})
}
BoxBodyInner::Stream(body) => Pin::new(body).poll_next(cx),
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
match self.0 {
BoxBodyInner::None(body) => Ok(body.try_into_bytes().unwrap()),
BoxBodyInner::Bytes(body) => Ok(body.try_into_bytes().unwrap()),
_ => Err(self),
}
}
#[inline]
fn boxed(self) -> BoxBody {
self
}
}
#[cfg(test)]
mod tests {
use static_assertions::{assert_impl_all, assert_not_impl_any};
use super::*;
use crate::body::to_bytes;
assert_impl_all!(BoxBody: fmt::Debug, MessageBody, Unpin);
assert_not_impl_any!(BoxBody: Send, Sync);
#[actix_rt::test]
async fn nested_boxed_body() {
let body = Bytes::from_static(&[1, 2, 3]);
let boxed_body = BoxBody::new(BoxBody::new(body));
assert_eq!(
to_bytes(boxed_body).await.unwrap(),
Bytes::from(vec![1, 2, 3]),
);
}
}

View File

@ -0,0 +1,122 @@
use std::{
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use pin_project_lite::pin_project;
use super::{BodySize, BoxBody, MessageBody};
use crate::Error;
pin_project! {
/// An "either" type specialized for body types.
///
/// It is common, in middleware especially, to conditionally return an inner service's unknown/
/// generic body `B` type or return early with a new response. This type's "right" variant
/// defaults to `BoxBody` since error responses are the common case.
///
/// For example, middleware will often have `type Response = ServiceResponse<EitherBody<B>>`.
/// This means that the inner service's response body type maps to the `Left` variant and the
/// middleware's own error responses use the default `Right` variant of `BoxBody`. Of course,
/// there's no reason it couldn't use `EitherBody<B, String>` instead if its alternative
/// responses have a known type.
#[project = EitherBodyProj]
#[derive(Debug, Clone)]
pub enum EitherBody<L, R = BoxBody> {
/// A body of type `L`.
Left { #[pin] body: L },
/// A body of type `R`.
Right { #[pin] body: R },
}
}
impl<L> EitherBody<L, BoxBody> {
/// Creates new `EitherBody` left variant with a boxed right variant.
///
/// If the expected `R` type will be inferred and is not `BoxBody` then use the
/// [`left`](Self::left) constructor instead.
#[inline]
pub fn new(body: L) -> Self {
Self::Left { body }
}
}
impl<L, R> EitherBody<L, R> {
/// Creates new `EitherBody` using left variant.
#[inline]
pub fn left(body: L) -> Self {
Self::Left { body }
}
/// Creates new `EitherBody` using right variant.
#[inline]
pub fn right(body: R) -> Self {
Self::Right { body }
}
}
impl<L, R> MessageBody for EitherBody<L, R>
where
L: MessageBody + 'static,
R: MessageBody + 'static,
{
type Error = Error;
#[inline]
fn size(&self) -> BodySize {
match self {
EitherBody::Left { body } => body.size(),
EitherBody::Right { body } => body.size(),
}
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
match self.project() {
EitherBodyProj::Left { body } => body
.poll_next(cx)
.map_err(|err| Error::new_body().with_cause(err)),
EitherBodyProj::Right { body } => body
.poll_next(cx)
.map_err(|err| Error::new_body().with_cause(err)),
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
match self {
EitherBody::Left { body } => body
.try_into_bytes()
.map_err(|body| EitherBody::Left { body }),
EitherBody::Right { body } => body
.try_into_bytes()
.map_err(|body| EitherBody::Right { body }),
}
}
#[inline]
fn boxed(self) -> BoxBody {
match self {
EitherBody::Left { body } => body.boxed(),
EitherBody::Right { body } => body.boxed(),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn type_parameter_inference() {
let _body: EitherBody<(), _> = EitherBody::new(());
let _body: EitherBody<_, ()> = EitherBody::left(());
let _body: EitherBody<(), _> = EitherBody::right(());
}
}

View File

@ -0,0 +1,609 @@
//! [`MessageBody`] trait and foreign implementations.
use std::{
convert::Infallible,
error::Error as StdError,
mem,
pin::Pin,
task::{Context, Poll},
};
use bytes::{Bytes, BytesMut};
use futures_core::ready;
use pin_project_lite::pin_project;
use super::{BodySize, BoxBody};
/// An interface for types that can be used as a response body.
///
/// It is not usually necessary to create custom body types, this trait is already [implemented for
/// a large number of sensible body types](#foreign-impls) including:
/// - Empty body: `()`
/// - Text-based: `String`, `&'static str`, [`ByteString`](https://docs.rs/bytestring/1).
/// - Byte-based: `Bytes`, `BytesMut`, `Vec<u8>`, `&'static [u8]`;
/// - Streams: [`BodyStream`](super::BodyStream), [`SizedStream`](super::SizedStream)
///
/// # Examples
/// ```
/// # use std::convert::Infallible;
/// # use std::task::{Poll, Context};
/// # use std::pin::Pin;
/// # use bytes::Bytes;
/// # use actix_http::body::{BodySize, MessageBody};
/// struct Repeat {
/// chunk: String,
/// n_times: usize,
/// }
///
/// impl MessageBody for Repeat {
/// type Error = Infallible;
///
/// fn size(&self) -> BodySize {
/// BodySize::Sized((self.chunk.len() * self.n_times) as u64)
/// }
///
/// fn poll_next(
/// self: Pin<&mut Self>,
/// _cx: &mut Context<'_>,
/// ) -> Poll<Option<Result<Bytes, Self::Error>>> {
/// let payload_string = self.chunk.repeat(self.n_times);
/// let payload_bytes = Bytes::from(payload_string);
/// Poll::Ready(Some(Ok(payload_bytes)))
/// }
/// }
/// ```
pub trait MessageBody {
/// The type of error that will be returned if streaming body fails.
///
/// Since it is not appropriate to generate a response mid-stream, it only requires `Error` for
/// internal use and logging.
type Error: Into<Box<dyn StdError>>;
/// Body size hint.
///
/// If [`BodySize::None`] is returned, optimizations that skip reading the body are allowed.
fn size(&self) -> BodySize;
/// Attempt to pull out the next chunk of body bytes.
///
/// # Return Value
/// Similar to the `Stream` interface, there are several possible return values, each indicating
/// a distinct state:
/// - `Poll::Pending` means that this body's next chunk is not ready yet. Implementations must
/// ensure that the current task will be notified when the next chunk may be ready.
/// - `Poll::Ready(Some(val))` means that the body has successfully produced a chunk, `val`,
/// and may produce further values on subsequent `poll_next` calls.
/// - `Poll::Ready(None)` means that the body is complete, and `poll_next` should not be
/// invoked again.
///
/// # Panics
/// Once a body is complete (i.e., `poll_next` returned `Ready(None)`), calling its `poll_next`
/// method again may panic, block forever, or cause other kinds of problems; this trait places
/// no requirements on the effects of such a call. However, as the `poll_next` method is not
/// marked unsafe, Rusts usual rules apply: calls must never cause UB, regardless of its state.
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>>;
/// Try to convert into the complete chunk of body bytes.
///
/// Override this method if the complete body can be trivially extracted. This is useful for
/// optimizations where `poll_next` calls can be avoided.
///
/// Body types with [`BodySize::None`] are allowed to return empty `Bytes`. Although, if calling
/// this method, it is recommended to check `size` first and return early.
///
/// # Errors
/// The default implementation will error and return the original type back to the caller for
/// further use.
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self>
where
Self: Sized,
{
Err(self)
}
/// Wraps this body into a `BoxBody`.
///
/// No-op when called on a `BoxBody`, meaning there is no risk of double boxing when calling
/// this on a generic `MessageBody`. Prefer this over [`BoxBody::new`] when a boxed body
/// is required.
#[inline]
fn boxed(self) -> BoxBody
where
Self: Sized + 'static,
{
BoxBody::new(self)
}
}
mod foreign_impls {
use super::*;
impl MessageBody for Infallible {
type Error = Infallible;
fn size(&self) -> BodySize {
match *self {}
}
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
match *self {}
}
}
impl MessageBody for () {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(0)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
Poll::Ready(None)
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::new())
}
}
impl<B> MessageBody for Box<B>
where
B: MessageBody + Unpin + ?Sized,
{
type Error = B::Error;
#[inline]
fn size(&self) -> BodySize {
self.as_ref().size()
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
Pin::new(self.get_mut().as_mut()).poll_next(cx)
}
}
impl<B> MessageBody for Pin<Box<B>>
where
B: MessageBody + ?Sized,
{
type Error = B::Error;
#[inline]
fn size(&self) -> BodySize {
self.as_ref().size()
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
self.get_mut().as_mut().poll_next(cx)
}
}
impl MessageBody for &'static [u8] {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from_static(mem::take(self.get_mut())))))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::from_static(self))
}
}
impl MessageBody for Bytes {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()))))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(self)
}
}
impl MessageBody for BytesMut {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()).freeze())))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(self.freeze())
}
}
impl MessageBody for Vec<u8> {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::take(self.get_mut()).into())))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::from(self))
}
}
impl MessageBody for &'static str {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
let string = mem::take(self.get_mut());
let bytes = Bytes::from_static(string.as_bytes());
Poll::Ready(Some(Ok(bytes)))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::from_static(self.as_bytes()))
}
}
impl MessageBody for String {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
let string = mem::take(self.get_mut());
Poll::Ready(Some(Ok(Bytes::from(string))))
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::from(self))
}
}
impl MessageBody for bytestring::ByteString {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
let string = mem::take(self.get_mut());
Poll::Ready(Some(Ok(string.into_bytes())))
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(self.into_bytes())
}
}
}
pin_project! {
pub(crate) struct MessageBodyMapErr<B, F> {
#[pin]
body: B,
mapper: Option<F>,
}
}
impl<B, F, E> MessageBodyMapErr<B, F>
where
B: MessageBody,
F: FnOnce(B::Error) -> E,
{
pub(crate) fn new(body: B, mapper: F) -> Self {
Self {
body,
mapper: Some(mapper),
}
}
}
impl<B, F, E> MessageBody for MessageBodyMapErr<B, F>
where
B: MessageBody,
F: FnOnce(B::Error) -> E,
E: Into<Box<dyn StdError>>,
{
type Error = E;
#[inline]
fn size(&self) -> BodySize {
self.body.size()
}
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
let this = self.as_mut().project();
match ready!(this.body.poll_next(cx)) {
Some(Err(err)) => {
let f = self.as_mut().project().mapper.take().unwrap();
let mapped_err = (f)(err);
Poll::Ready(Some(Err(mapped_err)))
}
Some(Ok(val)) => Poll::Ready(Some(Ok(val))),
None => Poll::Ready(None),
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
let Self { body, mapper } = self;
body.try_into_bytes().map_err(|body| Self { body, mapper })
}
}
#[cfg(test)]
mod tests {
use actix_rt::pin;
use actix_utils::future::poll_fn;
use bytes::{Bytes, BytesMut};
use super::*;
use crate::body::{self, EitherBody};
macro_rules! assert_poll_next {
($pin:expr, $exp:expr) => {
assert_eq!(
poll_fn(|cx| $pin.as_mut().poll_next(cx))
.await
.unwrap() // unwrap option
.unwrap(), // unwrap result
$exp
);
};
}
macro_rules! assert_poll_next_none {
($pin:expr) => {
assert!(poll_fn(|cx| $pin.as_mut().poll_next(cx)).await.is_none());
};
}
#[actix_rt::test]
async fn boxing_equivalence() {
assert_eq!(().size(), BodySize::Sized(0));
assert_eq!(().size(), Box::new(()).size());
assert_eq!(().size(), Box::pin(()).size());
let pl = Box::new(());
pin!(pl);
assert_poll_next_none!(pl);
let mut pl = Box::pin(());
assert_poll_next_none!(pl);
}
#[actix_rt::test]
async fn test_unit() {
let pl = ();
assert_eq!(pl.size(), BodySize::Sized(0));
pin!(pl);
assert_poll_next_none!(pl);
}
#[actix_rt::test]
async fn test_static_str() {
assert_eq!("".size(), BodySize::Sized(0));
assert_eq!("test".size(), BodySize::Sized(4));
let pl = "test";
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn test_static_bytes() {
assert_eq!(b"".as_ref().size(), BodySize::Sized(0));
assert_eq!(b"test".as_ref().size(), BodySize::Sized(4));
let pl = b"test".as_ref();
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn test_vec() {
assert_eq!(vec![0; 0].size(), BodySize::Sized(0));
assert_eq!(Vec::from("test").size(), BodySize::Sized(4));
let pl = Vec::from("test");
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn test_bytes() {
assert_eq!(Bytes::new().size(), BodySize::Sized(0));
assert_eq!(Bytes::from_static(b"test").size(), BodySize::Sized(4));
let pl = Bytes::from_static(b"test");
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn test_bytes_mut() {
assert_eq!(BytesMut::new().size(), BodySize::Sized(0));
assert_eq!(BytesMut::from(b"test".as_ref()).size(), BodySize::Sized(4));
let pl = BytesMut::from("test");
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn test_string() {
assert_eq!(String::new().size(), BodySize::Sized(0));
assert_eq!("test".to_owned().size(), BodySize::Sized(4));
let pl = "test".to_owned();
pin!(pl);
assert_poll_next!(pl, Bytes::from("test"));
}
#[actix_rt::test]
async fn complete_body_combinators() {
let body = Bytes::from_static(b"test");
let body = BoxBody::new(body);
let body = EitherBody::<_, ()>::left(body);
let body = EitherBody::<(), _>::right(body);
// Do not support try_into_bytes:
// let body = Box::new(body);
// let body = Box::pin(body);
assert_eq!(body.try_into_bytes().unwrap(), Bytes::from("test"));
}
#[actix_rt::test]
async fn complete_body_combinators_poll() {
let body = Bytes::from_static(b"test");
let body = BoxBody::new(body);
let body = EitherBody::<_, ()>::left(body);
let body = EitherBody::<(), _>::right(body);
let mut body = body;
assert_eq!(body.size(), BodySize::Sized(4));
assert_poll_next!(Pin::new(&mut body), Bytes::from("test"));
assert_poll_next_none!(Pin::new(&mut body));
}
#[actix_rt::test]
async fn none_body_combinators() {
fn none_body() -> BoxBody {
let body = body::None;
let body = BoxBody::new(body);
let body = EitherBody::<_, ()>::left(body);
let body = EitherBody::<(), _>::right(body);
body.boxed()
}
assert_eq!(none_body().size(), BodySize::None);
assert_eq!(none_body().try_into_bytes().unwrap(), Bytes::new());
assert_poll_next_none!(Pin::new(&mut none_body()));
}
// down-casting used to be done with a method on MessageBody trait
// test is kept to demonstrate equivalence of Any trait
#[actix_rt::test]
async fn test_body_casting() {
let mut body = String::from("hello cast");
// let mut resp_body: &mut dyn MessageBody<Error = Error> = &mut body;
let resp_body: &mut dyn std::any::Any = &mut body;
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast");
let body = &mut resp_body.downcast_mut::<String>().unwrap();
body.push('!');
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast!");
let not_body = resp_body.downcast_ref::<()>();
assert!(not_body.is_none());
}
}

View File

@ -0,0 +1,25 @@
//! Traits and structures to aid consuming and writing HTTP payloads.
//!
//! "Body" and "payload" are used somewhat interchangeably in this documentation.
// Though the spec kinda reads like "payload" is the possibly-transfer-encoded part of the message
// and the "body" is the intended possibly-decoded version of that.
mod body_stream;
mod boxed;
mod either;
mod message_body;
mod none;
mod size;
mod sized_stream;
mod utils;
pub use self::body_stream::BodyStream;
pub use self::boxed::BoxBody;
pub use self::either::EitherBody;
pub use self::message_body::MessageBody;
pub(crate) use self::message_body::MessageBodyMapErr;
pub use self::none::None;
pub use self::size::BodySize;
pub use self::sized_stream::SizedStream;
pub use self::utils::to_bytes;

View File

@ -0,0 +1,51 @@
use std::{
convert::Infallible,
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use super::{BodySize, MessageBody};
/// Body type for responses that forbid payloads.
///
/// This is distinct from an "empty" response which _would_ contain a `Content-Length` header.
/// For an "empty" body, use `()` or `Bytes::new()`.
///
/// For example, the HTTP spec forbids a payload to be sent with a `204 No Content` response.
/// In this case, the payload (or lack thereof) is implicit from the status code, so a
/// `Content-Length` header is not required.
#[derive(Debug, Clone, Copy, Default)]
#[non_exhaustive]
pub struct None;
impl None {
/// Constructs new "none" body.
#[inline]
pub fn new() -> Self {
None
}
}
impl MessageBody for None {
type Error = Infallible;
#[inline]
fn size(&self) -> BodySize {
BodySize::None
}
#[inline]
fn poll_next(
self: Pin<&mut Self>,
_cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
Poll::Ready(Option::None)
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self> {
Ok(Bytes::new())
}
}

View File

@ -0,0 +1,41 @@
/// Body size hint.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum BodySize {
/// Implicitly empty body.
///
/// Will omit the Content-Length header. Used for responses to certain methods (e.g., `HEAD`) or
/// with particular status codes (e.g., 204 No Content). Consumers that read this as a body size
/// hint are allowed to make optimizations that skip reading or writing the payload.
None,
/// Known size body.
///
/// Will write `Content-Length: N` header.
Sized(u64),
/// Unknown size body.
///
/// Will not write Content-Length header. Can be used with chunked Transfer-Encoding.
Stream,
}
impl BodySize {
/// Equivalent to `BodySize::Sized(0)`;
pub const ZERO: Self = Self::Sized(0);
/// Returns true if size hint indicates omitted or empty body.
///
/// Streams will return false because it cannot be known without reading the stream.
///
/// ```
/// # use actix_http::body::BodySize;
/// assert!(BodySize::None.is_eof());
/// assert!(BodySize::Sized(0).is_eof());
///
/// assert!(!BodySize::Sized(64).is_eof());
/// assert!(!BodySize::Stream.is_eof());
/// ```
pub fn is_eof(&self) -> bool {
matches!(self, BodySize::None | BodySize::Sized(0))
}
}

View File

@ -0,0 +1,171 @@
use std::{
error::Error as StdError,
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes;
use futures_core::{ready, Stream};
use pin_project_lite::pin_project;
use super::{BodySize, MessageBody};
pin_project! {
/// Known sized streaming response wrapper.
///
/// This body implementation should be used if total size of stream is known. Data is sent as-is
/// without using chunked transfer encoding.
pub struct SizedStream<S> {
size: u64,
#[pin]
stream: S,
}
}
impl<S, E> SizedStream<S>
where
S: Stream<Item = Result<Bytes, E>>,
E: Into<Box<dyn StdError>> + 'static,
{
#[inline]
pub fn new(size: u64, stream: S) -> Self {
SizedStream { size, stream }
}
}
// TODO: from_infallible method
impl<S, E> MessageBody for SizedStream<S>
where
S: Stream<Item = Result<Bytes, E>>,
E: Into<Box<dyn StdError>> + 'static,
{
type Error = E;
#[inline]
fn size(&self) -> BodySize {
BodySize::Sized(self.size as u64)
}
/// Attempts to pull out the next value of the underlying [`Stream`].
///
/// Empty values are skipped to prevent [`SizedStream`]'s transmission being
/// ended on a zero-length chunk, but rather proceed until the underlying
/// [`Stream`] ends.
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Self::Error>>> {
loop {
let stream = self.as_mut().project().stream;
let chunk = match ready!(stream.poll_next(cx)) {
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
val => val,
};
return Poll::Ready(chunk);
}
}
}
#[cfg(test)]
mod tests {
use std::convert::Infallible;
use actix_rt::pin;
use actix_utils::future::poll_fn;
use futures_util::stream;
use static_assertions::{assert_impl_all, assert_not_impl_any};
use super::*;
use crate::body::to_bytes;
assert_impl_all!(SizedStream<stream::Empty<Result<Bytes, crate::Error>>>: MessageBody);
assert_impl_all!(SizedStream<stream::Empty<Result<Bytes, &'static str>>>: MessageBody);
assert_impl_all!(SizedStream<stream::Repeat<Result<Bytes, &'static str>>>: MessageBody);
assert_impl_all!(SizedStream<stream::Empty<Result<Bytes, Infallible>>>: MessageBody);
assert_impl_all!(SizedStream<stream::Repeat<Result<Bytes, Infallible>>>: MessageBody);
assert_not_impl_any!(SizedStream<stream::Empty<Bytes>>: MessageBody);
assert_not_impl_any!(SizedStream<stream::Repeat<Bytes>>: MessageBody);
// crate::Error is not Clone
assert_not_impl_any!(SizedStream<stream::Repeat<Result<Bytes, crate::Error>>>: MessageBody);
#[actix_rt::test]
async fn skips_empty_chunks() {
let body = SizedStream::new(
2,
stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok::<_, Infallible>(Bytes::from(v))),
),
);
pin!(body);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("1")),
);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("2")),
);
}
#[actix_rt::test]
async fn read_to_bytes() {
let body = SizedStream::new(
2,
stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok::<_, Infallible>(Bytes::from(v))),
),
);
assert_eq!(to_bytes(body).await.ok(), Some(Bytes::from("12")));
}
#[actix_rt::test]
async fn stream_string_error() {
// `&'static str` does not impl `Error`
// but it does impl `Into<Box<dyn Error>>`
let body = SizedStream::new(0, stream::once(async { Err("stringy error") }));
assert_eq!(to_bytes(body).await, Ok(Bytes::new()));
let body = SizedStream::new(1, stream::once(async { Err("stringy error") }));
assert!(matches!(to_bytes(body).await, Err("stringy error")));
}
#[actix_rt::test]
async fn stream_boxed_error() {
// `Box<dyn Error>` does not impl `Error`
// but it does impl `Into<Box<dyn Error>>`
let body = SizedStream::new(
0,
stream::once(async { Err(Box::<dyn StdError>::from("stringy error")) }),
);
assert_eq!(to_bytes(body).await.unwrap(), Bytes::new());
let body = SizedStream::new(
1,
stream::once(async { Err(Box::<dyn StdError>::from("stringy error")) }),
);
assert_eq!(
to_bytes(body).await.unwrap_err().to_string(),
"stringy error"
);
}
}

View File

@ -0,0 +1,77 @@
use std::task::Poll;
use actix_rt::pin;
use actix_utils::future::poll_fn;
use bytes::{Bytes, BytesMut};
use futures_core::ready;
use super::{BodySize, MessageBody};
/// Collects the body produced by a `MessageBody` implementation into `Bytes`.
///
/// Any errors produced by the body stream are returned immediately.
///
/// # Examples
/// ```
/// use actix_http::body::{self, to_bytes};
/// use bytes::Bytes;
///
/// # async fn test_to_bytes() {
/// let body = body::None::new();
/// let bytes = to_bytes(body).await.unwrap();
/// assert!(bytes.is_empty());
///
/// let body = Bytes::from_static(b"123");
/// let bytes = to_bytes(body).await.unwrap();
/// assert_eq!(bytes, b"123"[..]);
/// # }
/// ```
pub async fn to_bytes<B: MessageBody>(body: B) -> Result<Bytes, B::Error> {
let cap = match body.size() {
BodySize::None | BodySize::Sized(0) => return Ok(Bytes::new()),
BodySize::Sized(size) => size as usize,
// good enough first guess for chunk size
BodySize::Stream => 32_768,
};
let mut buf = BytesMut::with_capacity(cap);
pin!(body);
poll_fn(|cx| loop {
let body = body.as_mut();
match ready!(body.poll_next(cx)) {
Some(Ok(bytes)) => buf.extend_from_slice(&*bytes),
None => return Poll::Ready(Ok(())),
Some(Err(err)) => return Poll::Ready(Err(err)),
}
})
.await?;
Ok(buf.freeze())
}
#[cfg(test)]
mod test {
use futures_util::{stream, StreamExt as _};
use super::*;
use crate::{body::BodyStream, Error};
#[actix_rt::test]
async fn test_to_bytes() {
let bytes = to_bytes(()).await.unwrap();
assert!(bytes.is_empty());
let body = Bytes::from_static(b"123");
let bytes = to_bytes(body).await.unwrap();
assert_eq!(bytes, b"123"[..]);
let stream = stream::iter(vec![Bytes::from_static(b"123"), Bytes::from_static(b"abc")])
.map(Ok::<_, Error>);
let body = BodyStream::new(stream);
let bytes = to_bytes(body).await.unwrap();
assert_eq!(bytes, b"123abc"[..]);
}
}

View File

@ -1,81 +1,73 @@
use std::marker::PhantomData;
use std::rc::Rc;
use std::{fmt, net};
use std::{fmt, marker::PhantomData, net, rc::Rc, time::Duration};
use actix_codec::Framed;
use actix_service::{IntoServiceFactory, Service, ServiceFactory};
use crate::body::MessageBody;
use crate::config::{KeepAlive, ServiceConfig};
use crate::error::Error;
use crate::h1::{Codec, ExpectHandler, H1Service, UpgradeHandler};
use crate::h2::H2Service;
use crate::helpers::{Data, DataFactory};
use crate::request::Request;
use crate::response::Response;
use crate::service::HttpService;
use crate::{ConnectCallback, Extensions};
use crate::{
body::{BoxBody, MessageBody},
h1::{self, ExpectHandler, H1Service, UpgradeHandler},
service::HttpService,
ConnectCallback, Extensions, KeepAlive, Request, Response, ServiceConfig,
};
/// A HTTP service builder
/// An HTTP service builder.
///
/// This type can be used to construct an instance of [`HttpService`] through a
/// builder-like pattern.
pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler<T>> {
/// This type can construct an instance of [`HttpService`] through a builder-like pattern.
pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler> {
keep_alive: KeepAlive,
client_timeout: u64,
client_disconnect: u64,
client_request_timeout: Duration,
client_disconnect_timeout: Duration,
secure: bool,
local_addr: Option<net::SocketAddr>,
expect: X,
upgrade: Option<U>,
// DEPRECATED: in favor of on_connect_ext
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
on_connect_ext: Option<Rc<ConnectCallback<T>>>,
_t: PhantomData<(T, S)>,
_phantom: PhantomData<S>,
}
impl<T, S> HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler<T>>
impl<T, S> Default for HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error> + 'static,
S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Response<BoxBody>> + 'static,
S::InitError: fmt::Debug,
<S::Service as Service>::Future: 'static,
<S::Service as Service<Request>>::Future: 'static,
{
/// Create instance of `ServiceConfigBuilder`
pub fn new() -> Self {
fn default() -> Self {
HttpServiceBuilder {
keep_alive: KeepAlive::Timeout(5),
client_timeout: 5000,
client_disconnect: 0,
// ServiceConfig parts (make sure defaults match)
keep_alive: KeepAlive::default(),
client_request_timeout: Duration::from_secs(5),
client_disconnect_timeout: Duration::ZERO,
secure: false,
local_addr: None,
// dispatcher parts
expect: ExpectHandler,
upgrade: None,
on_connect: None,
on_connect_ext: None,
_t: PhantomData,
_phantom: PhantomData,
}
}
}
impl<T, S, X, U> HttpServiceBuilder<T, S, X, U>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error> + 'static,
S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Response<BoxBody>> + 'static,
S::InitError: fmt::Debug,
<S::Service as Service>::Future: 'static,
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
X::Error: Into<Error>,
<S::Service as Service<Request>>::Future: 'static,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
<X::Service as Service>::Future: 'static,
U: ServiceFactory<Config = (), Request = (Request, Framed<T, Codec>), Response = ()>,
U: ServiceFactory<(Request, Framed<T, h1::Codec>), Config = (), Response = ()>,
U::Error: fmt::Display,
U::InitError: fmt::Debug,
<U::Service as Service>::Future: 'static,
{
/// Set server keep-alive setting.
/// Set connection keep-alive setting.
///
/// By default keep alive is set to a 5 seconds.
/// Applies to HTTP/1.1 keep-alive and HTTP/2 ping-pong.
///
/// By default keep-alive is 5 seconds.
pub fn keep_alive<W: Into<KeepAlive>>(mut self, val: W) -> Self {
self.keep_alive = val.into();
self
@ -93,33 +85,45 @@ where
self
}
/// Set server client timeout in milliseconds for first request.
/// Set client request timeout (for first request).
///
/// Defines a timeout for reading client request header. If a client does not transmit
/// the entire set headers within this time, the request is terminated with
/// the 408 (Request Time-out) error.
/// Defines a timeout for reading client request header. If the client does not transmit the
/// request head within this duration, the connection is terminated with a `408 Request Timeout`
/// response error.
///
/// To disable timeout set value to 0.
/// A duration of zero disables the timeout.
///
/// By default client timeout is set to 5000 milliseconds.
pub fn client_timeout(mut self, val: u64) -> Self {
self.client_timeout = val;
/// By default, the client timeout is 5 seconds.
pub fn client_request_timeout(mut self, dur: Duration) -> Self {
self.client_request_timeout = dur;
self
}
/// Set server connection disconnect timeout in milliseconds.
#[doc(hidden)]
#[deprecated(since = "3.0.0", note = "Renamed to `client_request_timeout`.")]
pub fn client_timeout(self, dur: Duration) -> Self {
self.client_request_timeout(dur)
}
/// Set client connection disconnect timeout.
///
/// Defines a timeout for disconnect connection. If a disconnect procedure does not complete
/// within this time, the request get dropped. This timeout affects secure connections.
///
/// To disable timeout set value to 0.
/// A duration of zero disables the timeout.
///
/// By default disconnect timeout is set to 0.
pub fn client_disconnect(mut self, val: u64) -> Self {
self.client_disconnect = val;
/// By default, the disconnect timeout is disabled.
pub fn client_disconnect_timeout(mut self, dur: Duration) -> Self {
self.client_disconnect_timeout = dur;
self
}
#[doc(hidden)]
#[deprecated(since = "3.0.0", note = "Renamed to `client_disconnect_timeout`.")]
pub fn client_disconnect(self, dur: Duration) -> Self {
self.client_disconnect_timeout(dur)
}
/// Provide service for `EXPECT: 100-Continue` support.
///
/// Service get called with request that contains `EXPECT` header.
@ -127,23 +131,21 @@ where
/// request will be forwarded to main service.
pub fn expect<F, X1>(self, expect: F) -> HttpServiceBuilder<T, S, X1, U>
where
F: IntoServiceFactory<X1>,
X1: ServiceFactory<Config = (), Request = Request, Response = Request>,
X1::Error: Into<Error>,
F: IntoServiceFactory<X1, Request>,
X1: ServiceFactory<Request, Config = (), Response = Request>,
X1::Error: Into<Response<BoxBody>>,
X1::InitError: fmt::Debug,
<X1::Service as Service>::Future: 'static,
{
HttpServiceBuilder {
keep_alive: self.keep_alive,
client_timeout: self.client_timeout,
client_disconnect: self.client_disconnect,
client_request_timeout: self.client_request_timeout,
client_disconnect_timeout: self.client_disconnect_timeout,
secure: self.secure,
local_addr: self.local_addr,
expect: expect.into_factory(),
upgrade: self.upgrade,
on_connect: self.on_connect,
on_connect_ext: self.on_connect_ext,
_t: PhantomData,
_phantom: PhantomData,
}
}
@ -153,44 +155,24 @@ where
/// and this service get called with original request and framed object.
pub fn upgrade<F, U1>(self, upgrade: F) -> HttpServiceBuilder<T, S, X, U1>
where
F: IntoServiceFactory<U1>,
U1: ServiceFactory<
Config = (),
Request = (Request, Framed<T, Codec>),
Response = (),
>,
F: IntoServiceFactory<U1, (Request, Framed<T, h1::Codec>)>,
U1: ServiceFactory<(Request, Framed<T, h1::Codec>), Config = (), Response = ()>,
U1::Error: fmt::Display,
U1::InitError: fmt::Debug,
<U1::Service as Service>::Future: 'static,
{
HttpServiceBuilder {
keep_alive: self.keep_alive,
client_timeout: self.client_timeout,
client_disconnect: self.client_disconnect,
client_request_timeout: self.client_request_timeout,
client_disconnect_timeout: self.client_disconnect_timeout,
secure: self.secure,
local_addr: self.local_addr,
expect: self.expect,
upgrade: Some(upgrade.into_factory()),
on_connect: self.on_connect,
on_connect_ext: self.on_connect_ext,
_t: PhantomData,
_phantom: PhantomData,
}
}
/// Set on-connect callback.
///
/// Called once per connection. Return value of the call is stored in request extensions.
///
/// *SOFT DEPRECATED*: Prefer the `on_connect_ext` style callback.
pub fn on_connect<F, I>(mut self, f: F) -> Self
where
F: Fn(&T) -> I + 'static,
I: Clone + 'static,
{
self.on_connect = Some(Rc::new(move |io| Box::new(Data(f(io)))));
self
}
/// Sets the callback to be run on connection establishment.
///
/// Has mutable access to a data container that will be merged into request extensions.
@ -208,15 +190,15 @@ where
pub fn h1<F, B>(self, service: F) -> H1Service<T, S, B, X, U>
where
B: MessageBody,
F: IntoServiceFactory<S>,
S::Error: Into<Error>,
F: IntoServiceFactory<S, Request>,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
{
let cfg = ServiceConfig::new(
self.keep_alive,
self.client_timeout,
self.client_disconnect,
self.client_request_timeout,
self.client_disconnect_timeout,
self.secure,
self.local_addr,
);
@ -224,47 +206,46 @@ where
H1Service::with_config(cfg, service.into_factory())
.expect(self.expect)
.upgrade(self.upgrade)
.on_connect(self.on_connect)
.on_connect_ext(self.on_connect_ext)
}
/// Finish service configuration and create a HTTP service for HTTP/2 protocol.
pub fn h2<F, B>(self, service: F) -> H2Service<T, S, B>
#[cfg(feature = "http2")]
pub fn h2<F, B>(self, service: F) -> crate::h2::H2Service<T, S, B>
where
B: MessageBody + 'static,
F: IntoServiceFactory<S>,
S::Error: Into<Error> + 'static,
F: IntoServiceFactory<S, Request>,
S::Error: Into<Response<BoxBody>> + 'static,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
{
let cfg = ServiceConfig::new(
self.keep_alive,
self.client_timeout,
self.client_disconnect,
self.client_request_timeout,
self.client_disconnect_timeout,
self.secure,
self.local_addr,
);
H2Service::with_config(cfg, service.into_factory())
.on_connect(self.on_connect)
crate::h2::H2Service::with_config(cfg, service.into_factory())
.on_connect_ext(self.on_connect_ext)
}
/// Finish service configuration and create `HttpService` instance.
pub fn finish<F, B>(self, service: F) -> HttpService<T, S, B, X, U>
where
B: MessageBody + 'static,
F: IntoServiceFactory<S>,
S::Error: Into<Error> + 'static,
F: IntoServiceFactory<S, Request>,
S::Error: Into<Response<BoxBody>> + 'static,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
{
let cfg = ServiceConfig::new(
self.keep_alive,
self.client_timeout,
self.client_disconnect,
self.client_request_timeout,
self.client_disconnect_timeout,
self.secure,
self.local_addr,
);
@ -272,7 +253,6 @@ where
HttpService::with_config(cfg, service.into_factory())
.expect(self.expect)
.upgrade(self.upgrade)
.on_connect(self.on_connect)
.on_connect_ext(self.on_connect_ext)
}
}

View File

@ -1,291 +0,0 @@
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, io, mem, time};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use bytes::{Buf, Bytes};
use futures_util::future::{err, Either, FutureExt, LocalBoxFuture, Ready};
use h2::client::SendRequest;
use pin_project::pin_project;
use crate::body::MessageBody;
use crate::h1::ClientCodec;
use crate::message::{RequestHeadType, ResponseHead};
use crate::payload::Payload;
use super::error::SendRequestError;
use super::pool::{Acquired, Protocol};
use super::{h1proto, h2proto};
pub(crate) enum ConnectionType<Io> {
H1(Io),
H2(SendRequest<Bytes>),
}
pub trait Connection {
type Io: AsyncRead + AsyncWrite + Unpin;
type Future: Future<Output = Result<(ResponseHead, Payload), SendRequestError>>;
fn protocol(&self) -> Protocol;
/// Send request and body
fn send_request<B: MessageBody + 'static, H: Into<RequestHeadType>>(
self,
head: H,
body: B,
) -> Self::Future;
type TunnelFuture: Future<
Output = Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
>;
/// Send request, returns Response and Framed
fn open_tunnel<H: Into<RequestHeadType>>(self, head: H) -> Self::TunnelFuture;
}
pub(crate) trait ConnectionLifetime: AsyncRead + AsyncWrite + 'static {
/// Close connection
fn close(self: Pin<&mut Self>);
/// Release connection to the connection pool
fn release(self: Pin<&mut Self>);
}
#[doc(hidden)]
/// HTTP client connection
pub struct IoConnection<T> {
io: Option<ConnectionType<T>>,
created: time::Instant,
pool: Option<Acquired<T>>,
}
impl<T> fmt::Debug for IoConnection<T>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.io {
Some(ConnectionType::H1(ref io)) => write!(f, "H1Connection({:?})", io),
Some(ConnectionType::H2(_)) => write!(f, "H2Connection"),
None => write!(f, "Connection(Empty)"),
}
}
}
impl<T: AsyncRead + AsyncWrite + Unpin> IoConnection<T> {
pub(crate) fn new(
io: ConnectionType<T>,
created: time::Instant,
pool: Option<Acquired<T>>,
) -> Self {
IoConnection {
pool,
created,
io: Some(io),
}
}
pub(crate) fn into_inner(self) -> (ConnectionType<T>, time::Instant) {
(self.io.unwrap(), self.created)
}
}
impl<T> Connection for IoConnection<T>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Io = T;
type Future =
LocalBoxFuture<'static, Result<(ResponseHead, Payload), SendRequestError>>;
fn protocol(&self) -> Protocol {
match self.io {
Some(ConnectionType::H1(_)) => Protocol::Http1,
Some(ConnectionType::H2(_)) => Protocol::Http2,
None => Protocol::Http1,
}
}
fn send_request<B: MessageBody + 'static, H: Into<RequestHeadType>>(
mut self,
head: H,
body: B,
) -> Self::Future {
match self.io.take().unwrap() {
ConnectionType::H1(io) => {
h1proto::send_request(io, head.into(), body, self.created, self.pool)
.boxed_local()
}
ConnectionType::H2(io) => {
h2proto::send_request(io, head.into(), body, self.created, self.pool)
.boxed_local()
}
}
}
type TunnelFuture = Either<
LocalBoxFuture<
'static,
Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
>,
Ready<Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>>,
>;
/// Send request, returns Response and Framed
fn open_tunnel<H: Into<RequestHeadType>>(mut self, head: H) -> Self::TunnelFuture {
match self.io.take().unwrap() {
ConnectionType::H1(io) => {
Either::Left(h1proto::open_tunnel(io, head.into()).boxed_local())
}
ConnectionType::H2(io) => {
if let Some(mut pool) = self.pool.take() {
pool.release(IoConnection::new(
ConnectionType::H2(io),
self.created,
None,
));
}
Either::Right(err(SendRequestError::TunnelNotSupported))
}
}
}
}
#[allow(dead_code)]
pub(crate) enum EitherConnection<A, B> {
A(IoConnection<A>),
B(IoConnection<B>),
}
impl<A, B> Connection for EitherConnection<A, B>
where
A: AsyncRead + AsyncWrite + Unpin + 'static,
B: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Io = EitherIo<A, B>;
type Future =
LocalBoxFuture<'static, Result<(ResponseHead, Payload), SendRequestError>>;
fn protocol(&self) -> Protocol {
match self {
EitherConnection::A(con) => con.protocol(),
EitherConnection::B(con) => con.protocol(),
}
}
fn send_request<RB: MessageBody + 'static, H: Into<RequestHeadType>>(
self,
head: H,
body: RB,
) -> Self::Future {
match self {
EitherConnection::A(con) => con.send_request(head, body),
EitherConnection::B(con) => con.send_request(head, body),
}
}
type TunnelFuture = LocalBoxFuture<
'static,
Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
>;
/// Send request, returns Response and Framed
fn open_tunnel<H: Into<RequestHeadType>>(self, head: H) -> Self::TunnelFuture {
match self {
EitherConnection::A(con) => con
.open_tunnel(head)
.map(|res| {
res.map(|(head, framed)| (head, framed.into_map_io(EitherIo::A)))
})
.boxed_local(),
EitherConnection::B(con) => con
.open_tunnel(head)
.map(|res| {
res.map(|(head, framed)| (head, framed.into_map_io(EitherIo::B)))
})
.boxed_local(),
}
}
}
#[pin_project(project = EitherIoProj)]
pub enum EitherIo<A, B> {
A(#[pin] A),
B(#[pin] B),
}
impl<A, B> AsyncRead for EitherIo<A, B>
where
A: AsyncRead,
B: AsyncRead,
{
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
match self.project() {
EitherIoProj::A(val) => val.poll_read(cx, buf),
EitherIoProj::B(val) => val.poll_read(cx, buf),
}
}
unsafe fn prepare_uninitialized_buffer(
&self,
buf: &mut [mem::MaybeUninit<u8>],
) -> bool {
match self {
EitherIo::A(ref val) => val.prepare_uninitialized_buffer(buf),
EitherIo::B(ref val) => val.prepare_uninitialized_buffer(buf),
}
}
}
impl<A, B> AsyncWrite for EitherIo<A, B>
where
A: AsyncWrite,
B: AsyncWrite,
{
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
match self.project() {
EitherIoProj::A(val) => val.poll_write(cx, buf),
EitherIoProj::B(val) => val.poll_write(cx, buf),
}
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
match self.project() {
EitherIoProj::A(val) => val.poll_flush(cx),
EitherIoProj::B(val) => val.poll_flush(cx),
}
}
fn poll_shutdown(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<io::Result<()>> {
match self.project() {
EitherIoProj::A(val) => val.poll_shutdown(cx),
EitherIoProj::B(val) => val.poll_shutdown(cx),
}
}
fn poll_write_buf<U: Buf>(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut U,
) -> Poll<Result<usize, io::Error>>
where
Self: Sized,
{
match self.project() {
EitherIoProj::A(val) => val.poll_write_buf(cx, buf),
EitherIoProj::B(val) => val.poll_write_buf(cx, buf),
}
}
}

View File

@ -1,547 +0,0 @@
use std::fmt;
use std::marker::PhantomData;
use std::time::Duration;
use actix_codec::{AsyncRead, AsyncWrite};
use actix_connect::{
default_connector, Connect as TcpConnect, Connection as TcpConnection,
};
use actix_rt::net::TcpStream;
use actix_service::{apply_fn, Service};
use actix_utils::timeout::{TimeoutError, TimeoutService};
use http::Uri;
use super::config::ConnectorConfig;
use super::connection::Connection;
use super::error::ConnectError;
use super::pool::{ConnectionPool, Protocol};
use super::Connect;
#[cfg(feature = "openssl")]
use actix_connect::ssl::openssl::SslConnector as OpensslConnector;
#[cfg(feature = "rustls")]
use actix_connect::ssl::rustls::ClientConfig;
#[cfg(feature = "rustls")]
use std::sync::Arc;
#[cfg(any(feature = "openssl", feature = "rustls"))]
enum SslConnector {
#[cfg(feature = "openssl")]
Openssl(OpensslConnector),
#[cfg(feature = "rustls")]
Rustls(Arc<ClientConfig>),
}
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
type SslConnector = ();
/// Manages http client network connectivity
/// The `Connector` type uses a builder-like combinator pattern for service
/// construction that finishes by calling the `.finish()` method.
///
/// ```rust,ignore
/// use std::time::Duration;
/// use actix_http::client::Connector;
///
/// let connector = Connector::new()
/// .timeout(Duration::from_secs(5))
/// .finish();
/// ```
pub struct Connector<T, U> {
connector: T,
config: ConnectorConfig,
#[allow(dead_code)]
ssl: SslConnector,
_t: PhantomData<U>,
}
trait Io: AsyncRead + AsyncWrite + Unpin {}
impl<T: AsyncRead + AsyncWrite + Unpin> Io for T {}
impl Connector<(), ()> {
#[allow(clippy::new_ret_no_self, clippy::let_unit_value)]
pub fn new() -> Connector<
impl Service<
Request = TcpConnect<Uri>,
Response = TcpConnection<Uri, TcpStream>,
Error = actix_connect::ConnectError,
> + Clone,
TcpStream,
> {
Connector {
ssl: Self::build_ssl(vec![b"h2".to_vec(), b"http/1.1".to_vec()]),
connector: default_connector(),
config: ConnectorConfig::default(),
_t: PhantomData,
}
}
// Build Ssl connector with openssl, based on supplied alpn protocols
#[cfg(feature = "openssl")]
fn build_ssl(protocols: Vec<Vec<u8>>) -> SslConnector {
use actix_connect::ssl::openssl::SslMethod;
use bytes::{BufMut, BytesMut};
let mut alpn = BytesMut::with_capacity(20);
for proto in protocols.iter() {
alpn.put_u8(proto.len() as u8);
alpn.put(proto.as_slice());
}
let mut ssl = OpensslConnector::builder(SslMethod::tls()).unwrap();
let _ = ssl
.set_alpn_protos(&alpn)
.map_err(|e| error!("Can not set alpn protocol: {:?}", e));
SslConnector::Openssl(ssl.build())
}
// Build Ssl connector with rustls, based on supplied alpn protocols
#[cfg(all(not(feature = "openssl"), feature = "rustls"))]
fn build_ssl(protocols: Vec<Vec<u8>>) -> SslConnector {
let mut config = ClientConfig::new();
config.set_protocols(&protocols);
config
.root_store
.add_server_trust_anchors(&actix_tls::rustls::TLS_SERVER_ROOTS);
SslConnector::Rustls(Arc::new(config))
}
// ssl turned off, provides empty ssl connector
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
fn build_ssl(_: Vec<Vec<u8>>) -> SslConnector {}
}
impl<T, U> Connector<T, U> {
/// Use custom connector.
pub fn connector<T1, U1>(self, connector: T1) -> Connector<T1, U1>
where
U1: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
T1: Service<
Request = TcpConnect<Uri>,
Response = TcpConnection<Uri, U1>,
Error = actix_connect::ConnectError,
> + Clone,
{
Connector {
connector,
config: self.config,
ssl: self.ssl,
_t: PhantomData,
}
}
}
impl<T, U> Connector<T, U>
where
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
T: Service<
Request = TcpConnect<Uri>,
Response = TcpConnection<Uri, U>,
Error = actix_connect::ConnectError,
> + Clone
+ 'static,
{
/// Connection timeout, i.e. max time to connect to remote host including dns name resolution.
/// Set to 1 second by default.
pub fn timeout(mut self, timeout: Duration) -> Self {
self.config.timeout = timeout;
self
}
#[cfg(feature = "openssl")]
/// Use custom `SslConnector` instance.
pub fn ssl(mut self, connector: OpensslConnector) -> Self {
self.ssl = SslConnector::Openssl(connector);
self
}
#[cfg(feature = "rustls")]
pub fn rustls(mut self, connector: Arc<ClientConfig>) -> Self {
self.ssl = SslConnector::Rustls(connector);
self
}
/// Maximum supported http major version
/// Supported versions http/1.1, http/2
pub fn max_http_version(mut self, val: http::Version) -> Self {
let versions = match val {
http::Version::HTTP_11 => vec![b"http/1.1".to_vec()],
http::Version::HTTP_2 => vec![b"h2".to_vec(), b"http/1.1".to_vec()],
_ => {
unimplemented!("actix-http:client: supported versions http/1.1, http/2")
}
};
self.ssl = Connector::build_ssl(versions);
self
}
/// Indicates the initial window size (in octets) for
/// HTTP2 stream-level flow control for received data.
///
/// The default value is 65,535 and is good for APIs, but not for big objects.
pub fn initial_window_size(mut self, size: u32) -> Self {
self.config.stream_window_size = size;
self
}
/// Indicates the initial window size (in octets) for
/// HTTP2 connection-level flow control for received data.
///
/// The default value is 65,535 and is good for APIs, but not for big objects.
pub fn initial_connection_window_size(mut self, size: u32) -> Self {
self.config.conn_window_size = size;
self
}
/// Set total number of simultaneous connections per type of scheme.
///
/// If limit is 0, the connector has no limit.
/// The default limit size is 100.
pub fn limit(mut self, limit: usize) -> Self {
self.config.limit = limit;
self
}
/// Set keep-alive period for opened connection.
///
/// Keep-alive period is the period between connection usage. If
/// the delay between repeated usages of the same connection
/// exceeds this period, the connection is closed.
/// Default keep-alive period is 15 seconds.
pub fn conn_keep_alive(mut self, dur: Duration) -> Self {
self.config.conn_keep_alive = dur;
self
}
/// Set max lifetime period for connection.
///
/// Connection lifetime is max lifetime of any opened connection
/// until it is closed regardless of keep-alive period.
/// Default lifetime period is 75 seconds.
pub fn conn_lifetime(mut self, dur: Duration) -> Self {
self.config.conn_lifetime = dur;
self
}
/// Set server connection disconnect timeout in milliseconds.
///
/// Defines a timeout for disconnect connection. If a disconnect procedure does not complete
/// within this time, the socket get dropped. This timeout affects only secure connections.
///
/// To disable timeout set value to 0.
///
/// By default disconnect timeout is set to 3000 milliseconds.
pub fn disconnect_timeout(mut self, dur: Duration) -> Self {
self.config.disconnect_timeout = Some(dur);
self
}
/// Finish configuration process and create connector service.
/// The Connector builder always concludes by calling `finish()` last in
/// its combinator chain.
pub fn finish(
self,
) -> impl Service<Request = Connect, Response = impl Connection, Error = ConnectError>
+ Clone {
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
{
let connector = TimeoutService::new(
self.config.timeout,
apply_fn(self.connector, |msg: Connect, srv| {
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
})
.map_err(ConnectError::from)
.map(|stream| (stream.into_parts().0, Protocol::Http1)),
)
.map_err(|e| match e {
TimeoutError::Service(e) => e,
TimeoutError::Timeout => ConnectError::Timeout,
});
connect_impl::InnerConnector {
tcp_pool: ConnectionPool::new(
connector,
self.config.no_disconnect_timeout(),
),
}
}
#[cfg(any(feature = "openssl", feature = "rustls"))]
{
const H2: &[u8] = b"h2";
#[cfg(feature = "openssl")]
use actix_connect::ssl::openssl::OpensslConnector;
#[cfg(feature = "rustls")]
use actix_connect::ssl::rustls::{RustlsConnector, Session};
use actix_service::{boxed::service, pipeline};
let ssl_service = TimeoutService::new(
self.config.timeout,
pipeline(
apply_fn(self.connector.clone(), |msg: Connect, srv| {
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
})
.map_err(ConnectError::from),
)
.and_then(match self.ssl {
#[cfg(feature = "openssl")]
SslConnector::Openssl(ssl) => service(
OpensslConnector::service(ssl)
.map(|stream| {
let sock = stream.into_parts().0;
let h2 = sock
.ssl()
.selected_alpn_protocol()
.map(|protos| protos.windows(2).any(|w| w == H2))
.unwrap_or(false);
if h2 {
(Box::new(sock) as Box<dyn Io>, Protocol::Http2)
} else {
(Box::new(sock) as Box<dyn Io>, Protocol::Http1)
}
})
.map_err(ConnectError::from),
),
#[cfg(feature = "rustls")]
SslConnector::Rustls(ssl) => service(
RustlsConnector::service(ssl)
.map_err(ConnectError::from)
.map(|stream| {
let sock = stream.into_parts().0;
let h2 = sock
.get_ref()
.1
.get_alpn_protocol()
.map(|protos| protos.windows(2).any(|w| w == H2))
.unwrap_or(false);
if h2 {
(Box::new(sock) as Box<dyn Io>, Protocol::Http2)
} else {
(Box::new(sock) as Box<dyn Io>, Protocol::Http1)
}
}),
),
}),
)
.map_err(|e| match e {
TimeoutError::Service(e) => e,
TimeoutError::Timeout => ConnectError::Timeout,
});
let tcp_service = TimeoutService::new(
self.config.timeout,
apply_fn(self.connector, |msg: Connect, srv| {
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
})
.map_err(ConnectError::from)
.map(|stream| (stream.into_parts().0, Protocol::Http1)),
)
.map_err(|e| match e {
TimeoutError::Service(e) => e,
TimeoutError::Timeout => ConnectError::Timeout,
});
connect_impl::InnerConnector {
tcp_pool: ConnectionPool::new(
tcp_service,
self.config.no_disconnect_timeout(),
),
ssl_pool: ConnectionPool::new(ssl_service, self.config),
}
}
}
}
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
mod connect_impl {
use std::task::{Context, Poll};
use futures_util::future::{err, Either, Ready};
use super::*;
use crate::client::connection::IoConnection;
pub(crate) struct InnerConnector<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
pub(crate) tcp_pool: ConnectionPool<T, Io>,
}
impl<T, Io> Clone for InnerConnector<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
fn clone(&self) -> Self {
InnerConnector {
tcp_pool: self.tcp_pool.clone(),
}
}
}
impl<T, Io> Service for InnerConnector<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
type Request = Connect;
type Response = IoConnection<Io>;
type Error = ConnectError;
type Future = Either<
<ConnectionPool<T, Io> as Service>::Future,
Ready<Result<IoConnection<Io>, ConnectError>>,
>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.tcp_pool.poll_ready(cx)
}
fn call(&mut self, req: Connect) -> Self::Future {
match req.uri.scheme_str() {
Some("https") | Some("wss") => {
Either::Right(err(ConnectError::SslIsNotSupported))
}
_ => Either::Left(self.tcp_pool.call(req)),
}
}
}
}
#[cfg(any(feature = "openssl", feature = "rustls"))]
mod connect_impl {
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use futures_core::ready;
use futures_util::future::Either;
use super::*;
use crate::client::connection::EitherConnection;
pub(crate) struct InnerConnector<T1, T2, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>,
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>,
{
pub(crate) tcp_pool: ConnectionPool<T1, Io1>,
pub(crate) ssl_pool: ConnectionPool<T2, Io2>,
}
impl<T1, T2, Io1, Io2> Clone for InnerConnector<T1, T2, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
+ 'static,
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
{
fn clone(&self) -> Self {
InnerConnector {
tcp_pool: self.tcp_pool.clone(),
ssl_pool: self.ssl_pool.clone(),
}
}
}
impl<T1, T2, Io1, Io2> Service for InnerConnector<T1, T2, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
+ 'static,
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
{
type Request = Connect;
type Response = EitherConnection<Io1, Io2>;
type Error = ConnectError;
type Future = Either<
InnerConnectorResponseA<T1, Io1, Io2>,
InnerConnectorResponseB<T2, Io1, Io2>,
>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.tcp_pool.poll_ready(cx)
}
fn call(&mut self, req: Connect) -> Self::Future {
match req.uri.scheme_str() {
Some("https") | Some("wss") => Either::Right(InnerConnectorResponseB {
fut: self.ssl_pool.call(req),
_t: PhantomData,
}),
_ => Either::Left(InnerConnectorResponseA {
fut: self.tcp_pool.call(req),
_t: PhantomData,
}),
}
}
}
#[pin_project::pin_project]
pub(crate) struct InnerConnectorResponseA<T, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
+ 'static,
{
#[pin]
fut: <ConnectionPool<T, Io1> as Service>::Future,
_t: PhantomData<Io2>,
}
impl<T, Io1, Io2> Future for InnerConnectorResponseA<T, Io1, Io2>
where
T: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
+ 'static,
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Output = Result<EitherConnection<Io1, Io2>, ConnectError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(
ready!(Pin::new(&mut self.get_mut().fut).poll(cx))
.map(EitherConnection::A),
)
}
}
#[pin_project::pin_project]
pub(crate) struct InnerConnectorResponseB<T, Io1, Io2>
where
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
{
#[pin]
fut: <ConnectionPool<T, Io2> as Service>::Future,
_t: PhantomData<Io1>,
}
impl<T, Io1, Io2> Future for InnerConnectorResponseB<T, Io1, Io2>
where
T: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Output = Result<EitherConnection<Io1, Io2>, ConnectError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(
ready!(Pin::new(&mut self.get_mut().fut).poll(cx))
.map(EitherConnection::B),
)
}
}
}

View File

@ -1,298 +0,0 @@
use std::io::Write;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{io, mem, time};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use bytes::buf::BufMutExt;
use bytes::{Bytes, BytesMut};
use futures_core::Stream;
use futures_util::future::poll_fn;
use futures_util::{pin_mut, SinkExt, StreamExt};
use crate::error::PayloadError;
use crate::h1;
use crate::header::HeaderMap;
use crate::http::header::{IntoHeaderValue, HOST};
use crate::message::{RequestHeadType, ResponseHead};
use crate::payload::{Payload, PayloadStream};
use super::connection::{ConnectionLifetime, ConnectionType, IoConnection};
use super::error::{ConnectError, SendRequestError};
use super::pool::Acquired;
use crate::body::{BodySize, MessageBody};
pub(crate) async fn send_request<T, B>(
io: T,
mut head: RequestHeadType,
body: B,
created: time::Instant,
pool: Option<Acquired<T>>,
) -> Result<(ResponseHead, Payload), SendRequestError>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
B: MessageBody,
{
// set request host header
if !head.as_ref().headers.contains_key(HOST)
&& !head.extra_headers().iter().any(|h| h.contains_key(HOST))
{
if let Some(host) = head.as_ref().uri.host() {
let mut wrt = BytesMut::with_capacity(host.len() + 5).writer();
let _ = match head.as_ref().uri.port_u16() {
None | Some(80) | Some(443) => write!(wrt, "{}", host),
Some(port) => write!(wrt, "{}:{}", host, port),
};
match wrt.get_mut().split().freeze().try_into() {
Ok(value) => match head {
RequestHeadType::Owned(ref mut head) => {
head.headers.insert(HOST, value)
}
RequestHeadType::Rc(_, ref mut extra_headers) => {
let headers = extra_headers.get_or_insert(HeaderMap::new());
headers.insert(HOST, value)
}
},
Err(e) => log::error!("Can not set HOST header {}", e),
}
}
}
let io = H1Connection {
created,
pool,
io: Some(io),
};
// create Framed and send request
let mut framed_inner = Framed::new(io, h1::ClientCodec::default());
framed_inner.send((head, body.size()).into()).await?;
// send request body
match body.size() {
BodySize::None | BodySize::Empty | BodySize::Sized(0) => (),
_ => send_body(body, Pin::new(&mut framed_inner)).await?,
};
// read response and init read body
let res = Pin::new(&mut framed_inner).into_future().await;
let (head, framed) = if let (Some(result), framed) = res {
let item = result.map_err(SendRequestError::from)?;
(item, framed)
} else {
return Err(SendRequestError::from(ConnectError::Disconnected));
};
match framed.codec_ref().message_type() {
h1::MessageType::None => {
let force_close = !framed.codec_ref().keepalive();
release_connection(framed, force_close);
Ok((head, Payload::None))
}
_ => {
let pl: PayloadStream = PlStream::new(framed_inner).boxed_local();
Ok((head, pl.into()))
}
}
}
pub(crate) async fn open_tunnel<T>(
io: T,
head: RequestHeadType,
) -> Result<(ResponseHead, Framed<T, h1::ClientCodec>), SendRequestError>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
// create Framed and send request
let mut framed = Framed::new(io, h1::ClientCodec::default());
framed.send((head, BodySize::None).into()).await?;
// read response
if let (Some(result), framed) = framed.into_future().await {
let head = result.map_err(SendRequestError::from)?;
Ok((head, framed))
} else {
Err(SendRequestError::from(ConnectError::Disconnected))
}
}
/// send request body to the peer
pub(crate) async fn send_body<T, B>(
body: B,
mut framed: Pin<&mut Framed<T, h1::ClientCodec>>,
) -> Result<(), SendRequestError>
where
T: ConnectionLifetime + Unpin,
B: MessageBody,
{
pin_mut!(body);
let mut eof = false;
while !eof {
while !eof && !framed.as_ref().is_write_buf_full() {
match poll_fn(|cx| body.as_mut().poll_next(cx)).await {
Some(result) => {
framed.as_mut().write(h1::Message::Chunk(Some(result?)))?;
}
None => {
eof = true;
framed.as_mut().write(h1::Message::Chunk(None))?;
}
}
}
if !framed.as_ref().is_write_buf_empty() {
poll_fn(|cx| match framed.as_mut().flush(cx) {
Poll::Ready(Ok(_)) => Poll::Ready(Ok(())),
Poll::Ready(Err(err)) => Poll::Ready(Err(err)),
Poll::Pending => {
if !framed.as_ref().is_write_buf_full() {
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
}
})
.await?;
}
}
SinkExt::flush(Pin::into_inner(framed)).await?;
Ok(())
}
#[doc(hidden)]
/// HTTP client connection
pub struct H1Connection<T> {
/// T should be `Unpin`
io: Option<T>,
created: time::Instant,
pool: Option<Acquired<T>>,
}
impl<T> ConnectionLifetime for H1Connection<T>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
/// Close connection
fn close(mut self: Pin<&mut Self>) {
if let Some(mut pool) = self.pool.take() {
if let Some(io) = self.io.take() {
pool.close(IoConnection::new(
ConnectionType::H1(io),
self.created,
None,
));
}
}
}
/// Release this connection to the connection pool
fn release(mut self: Pin<&mut Self>) {
if let Some(mut pool) = self.pool.take() {
if let Some(io) = self.io.take() {
pool.release(IoConnection::new(
ConnectionType::H1(io),
self.created,
None,
));
}
}
}
}
impl<T: AsyncRead + AsyncWrite + Unpin + 'static> AsyncRead for H1Connection<T> {
unsafe fn prepare_uninitialized_buffer(
&self,
buf: &mut [mem::MaybeUninit<u8>],
) -> bool {
self.io.as_ref().unwrap().prepare_uninitialized_buffer(buf)
}
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.io.as_mut().unwrap()).poll_read(cx, buf)
}
}
impl<T: AsyncRead + AsyncWrite + Unpin + 'static> AsyncWrite for H1Connection<T> {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.io.as_mut().unwrap()).poll_write(cx, buf)
}
fn poll_flush(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<io::Result<()>> {
Pin::new(self.io.as_mut().unwrap()).poll_flush(cx)
}
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), io::Error>> {
Pin::new(self.io.as_mut().unwrap()).poll_shutdown(cx)
}
}
#[pin_project::pin_project]
pub(crate) struct PlStream<Io> {
#[pin]
framed: Option<Framed<Io, h1::ClientPayloadCodec>>,
}
impl<Io: ConnectionLifetime> PlStream<Io> {
fn new(framed: Framed<Io, h1::ClientCodec>) -> Self {
let framed = framed.into_map_codec(|codec| codec.into_payload_codec());
PlStream {
framed: Some(framed),
}
}
}
impl<Io: ConnectionLifetime> Stream for PlStream<Io> {
type Item = Result<Bytes, PayloadError>;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
let mut this = self.project();
match this.framed.as_mut().as_pin_mut().unwrap().next_item(cx)? {
Poll::Pending => Poll::Pending,
Poll::Ready(Some(chunk)) => {
if let Some(chunk) = chunk {
Poll::Ready(Some(Ok(chunk)))
} else {
let framed = this.framed.as_mut().as_pin_mut().unwrap();
let force_close = !framed.codec_ref().keepalive();
release_connection(framed, force_close);
Poll::Ready(None)
}
}
Poll::Ready(None) => Poll::Ready(None),
}
}
}
fn release_connection<T, U>(framed: Pin<&mut Framed<T, U>>, force_close: bool)
where
T: ConnectionLifetime,
{
if !force_close && framed.is_read_buf_empty() && framed.is_write_buf_empty() {
framed.io_pin().release()
} else {
framed.io_pin().close()
}
}

View File

@ -1,21 +0,0 @@
//! Http client api
use http::Uri;
mod config;
mod connection;
mod connector;
mod error;
mod h1proto;
mod h2proto;
mod pool;
pub use self::connection::Connection;
pub use self::connector::Connector;
pub use self::error::{ConnectError, FreezeRequestError, InvalidUrl, SendRequestError};
pub use self::pool::Protocol;
#[derive(Clone)]
pub struct Connect {
pub uri: Uri,
pub addr: Option<std::net::SocketAddr>,
}

View File

@ -1,645 +0,0 @@
use std::cell::RefCell;
use std::collections::VecDeque;
use std::future::Future;
use std::pin::Pin;
use std::rc::Rc;
use std::task::{Context, Poll};
use std::time::{Duration, Instant};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::time::{delay_for, Delay};
use actix_service::Service;
use actix_utils::task::LocalWaker;
use bytes::Bytes;
use futures_channel::oneshot;
use futures_util::future::{poll_fn, FutureExt, LocalBoxFuture};
use fxhash::FxHashMap;
use h2::client::{Connection, SendRequest};
use http::uri::Authority;
use indexmap::IndexSet;
use pin_project::pin_project;
use slab::Slab;
use super::config::ConnectorConfig;
use super::connection::{ConnectionType, IoConnection};
use super::error::ConnectError;
use super::h2proto::handshake;
use super::Connect;
#[derive(Clone, Copy, PartialEq)]
/// Protocol version
pub enum Protocol {
Http1,
Http2,
}
#[derive(Hash, Eq, PartialEq, Clone, Debug)]
pub(crate) struct Key {
authority: Authority,
}
impl From<Authority> for Key {
fn from(authority: Authority) -> Key {
Key { authority }
}
}
/// Connections pool
pub(crate) struct ConnectionPool<T, Io: 'static>(Rc<RefCell<T>>, Rc<RefCell<Inner<Io>>>);
impl<T, Io> ConnectionPool<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
pub(crate) fn new(connector: T, config: ConnectorConfig) -> Self {
let connector_rc = Rc::new(RefCell::new(connector));
let inner_rc = Rc::new(RefCell::new(Inner {
config,
acquired: 0,
waiters: Slab::new(),
waiters_queue: IndexSet::new(),
available: FxHashMap::default(),
waker: LocalWaker::new(),
}));
// start support future
actix_rt::spawn(ConnectorPoolSupport {
connector: Rc::clone(&connector_rc),
inner: Rc::clone(&inner_rc),
});
ConnectionPool(connector_rc, inner_rc)
}
}
impl<T, Io> Clone for ConnectionPool<T, Io>
where
Io: 'static,
{
fn clone(&self) -> Self {
ConnectionPool(self.0.clone(), self.1.clone())
}
}
impl<T, Io> Drop for ConnectionPool<T, Io> {
fn drop(&mut self) {
// wake up the ConnectorPoolSupport when dropping so it can exit properly.
self.1.borrow().waker.wake();
}
}
impl<T, Io> Service for ConnectionPool<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
type Request = Connect;
type Response = IoConnection<Io>;
type Error = ConnectError;
type Future = LocalBoxFuture<'static, Result<IoConnection<Io>, ConnectError>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.poll_ready(cx)
}
fn call(&mut self, req: Connect) -> Self::Future {
let mut connector = self.0.clone();
let inner = self.1.clone();
let fut = async move {
let key = if let Some(authority) = req.uri.authority() {
authority.clone().into()
} else {
return Err(ConnectError::Unresolved);
};
// acquire connection
match poll_fn(|cx| Poll::Ready(inner.borrow_mut().acquire(&key, cx))).await {
Acquire::Acquired(io, created) => {
// use existing connection
Ok(IoConnection::new(
io,
created,
Some(Acquired(key, Some(inner))),
))
}
Acquire::Available => {
// open tcp connection
let (io, proto) = connector.call(req).await?;
let config = inner.borrow().config.clone();
let guard = OpenGuard::new(key, inner);
if proto == Protocol::Http1 {
Ok(IoConnection::new(
ConnectionType::H1(io),
Instant::now(),
Some(guard.consume()),
))
} else {
let (snd, connection) = handshake(io, &config).await?;
actix_rt::spawn(connection.map(|_| ()));
Ok(IoConnection::new(
ConnectionType::H2(snd),
Instant::now(),
Some(guard.consume()),
))
}
}
_ => {
// connection is not available, wait
let (rx, token) = inner.borrow_mut().wait_for(req);
let guard = WaiterGuard::new(key, token, inner);
let res = match rx.await {
Err(_) => Err(ConnectError::Disconnected),
Ok(res) => res,
};
guard.consume();
res
}
}
};
fut.boxed_local()
}
}
struct WaiterGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
key: Key,
token: usize,
inner: Option<Rc<RefCell<Inner<Io>>>>,
}
impl<Io> WaiterGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn new(key: Key, token: usize, inner: Rc<RefCell<Inner<Io>>>) -> Self {
Self {
key,
token,
inner: Some(inner),
}
}
fn consume(mut self) {
let _ = self.inner.take();
}
}
impl<Io> Drop for WaiterGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn drop(&mut self) {
if let Some(i) = self.inner.take() {
let mut inner = i.as_ref().borrow_mut();
inner.release_waiter(&self.key, self.token);
inner.check_availability();
}
}
}
struct OpenGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
key: Key,
inner: Option<Rc<RefCell<Inner<Io>>>>,
}
impl<Io> OpenGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn new(key: Key, inner: Rc<RefCell<Inner<Io>>>) -> Self {
Self {
key,
inner: Some(inner),
}
}
fn consume(mut self) -> Acquired<Io> {
Acquired(self.key.clone(), self.inner.take())
}
}
impl<Io> Drop for OpenGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn drop(&mut self) {
if let Some(i) = self.inner.take() {
let mut inner = i.as_ref().borrow_mut();
inner.release();
inner.check_availability();
}
}
}
enum Acquire<T> {
Acquired(ConnectionType<T>, Instant),
Available,
NotAvailable,
}
struct AvailableConnection<Io> {
io: ConnectionType<Io>,
used: Instant,
created: Instant,
}
pub(crate) struct Inner<Io> {
config: ConnectorConfig,
acquired: usize,
available: FxHashMap<Key, VecDeque<AvailableConnection<Io>>>,
waiters: Slab<
Option<(
Connect,
oneshot::Sender<Result<IoConnection<Io>, ConnectError>>,
)>,
>,
waiters_queue: IndexSet<(Key, usize)>,
waker: LocalWaker,
}
impl<Io> Inner<Io> {
fn reserve(&mut self) {
self.acquired += 1;
}
fn release(&mut self) {
self.acquired -= 1;
}
fn release_waiter(&mut self, key: &Key, token: usize) {
self.waiters.remove(token);
let _ = self.waiters_queue.shift_remove(&(key.clone(), token));
}
}
impl<Io> Inner<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
/// connection is not available, wait
fn wait_for(
&mut self,
connect: Connect,
) -> (
oneshot::Receiver<Result<IoConnection<Io>, ConnectError>>,
usize,
) {
let (tx, rx) = oneshot::channel();
let key: Key = connect.uri.authority().unwrap().clone().into();
let entry = self.waiters.vacant_entry();
let token = entry.key();
entry.insert(Some((connect, tx)));
assert!(self.waiters_queue.insert((key, token)));
(rx, token)
}
fn acquire(&mut self, key: &Key, cx: &mut Context<'_>) -> Acquire<Io> {
// check limits
if self.config.limit > 0 && self.acquired >= self.config.limit {
return Acquire::NotAvailable;
}
self.reserve();
// check if open connection is available
// cleanup stale connections at the same time
if let Some(ref mut connections) = self.available.get_mut(key) {
let now = Instant::now();
while let Some(conn) = connections.pop_back() {
// check if it still usable
if (now - conn.used) > self.config.conn_keep_alive
|| (now - conn.created) > self.config.conn_lifetime
{
if let Some(timeout) = self.config.disconnect_timeout {
if let ConnectionType::H1(io) = conn.io {
actix_rt::spawn(CloseConnection::new(io, timeout))
}
}
} else {
let mut io = conn.io;
let mut buf = [0; 2];
if let ConnectionType::H1(ref mut s) = io {
match Pin::new(s).poll_read(cx, &mut buf) {
Poll::Pending => (),
Poll::Ready(Ok(n)) if n > 0 => {
if let Some(timeout) = self.config.disconnect_timeout {
if let ConnectionType::H1(io) = io {
actix_rt::spawn(CloseConnection::new(
io, timeout,
))
}
}
continue;
}
_ => continue,
}
}
return Acquire::Acquired(io, conn.created);
}
}
}
Acquire::Available
}
fn release_conn(&mut self, key: &Key, io: ConnectionType<Io>, created: Instant) {
self.acquired -= 1;
self.available
.entry(key.clone())
.or_insert_with(VecDeque::new)
.push_back(AvailableConnection {
io,
created,
used: Instant::now(),
});
self.check_availability();
}
fn release_close(&mut self, io: ConnectionType<Io>) {
self.acquired -= 1;
if let Some(timeout) = self.config.disconnect_timeout {
if let ConnectionType::H1(io) = io {
actix_rt::spawn(CloseConnection::new(io, timeout))
}
}
self.check_availability();
}
fn check_availability(&self) {
if !self.waiters_queue.is_empty() && self.acquired < self.config.limit {
self.waker.wake();
}
}
}
struct CloseConnection<T> {
io: T,
timeout: Delay,
}
impl<T> CloseConnection<T>
where
T: AsyncWrite + Unpin,
{
fn new(io: T, timeout: Duration) -> Self {
CloseConnection {
io,
timeout: delay_for(timeout),
}
}
}
impl<T> Future for CloseConnection<T>
where
T: AsyncWrite + Unpin,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
let this = self.get_mut();
match Pin::new(&mut this.timeout).poll(cx) {
Poll::Ready(_) => Poll::Ready(()),
Poll::Pending => match Pin::new(&mut this.io).poll_shutdown(cx) {
Poll::Ready(_) => Poll::Ready(()),
Poll::Pending => Poll::Pending,
},
}
}
}
#[pin_project]
struct ConnectorPoolSupport<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
connector: T,
inner: Rc<RefCell<Inner<Io>>>,
}
impl<T, Io> Future for ConnectorPoolSupport<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>,
T::Future: 'static,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
if Rc::strong_count(this.inner) == 1 {
// If we are last copy of Inner<Io> it means the ConnectionPool is already gone
// and we are safe to exit.
return Poll::Ready(());
}
let mut inner = this.inner.borrow_mut();
inner.waker.register(cx.waker());
// check waiters
loop {
let (key, token) = {
if let Some((key, token)) = inner.waiters_queue.get_index(0) {
(key.clone(), *token)
} else {
break;
}
};
if inner.waiters.get(token).unwrap().is_none() {
continue;
}
match inner.acquire(&key, cx) {
Acquire::NotAvailable => break,
Acquire::Acquired(io, created) => {
let tx = inner.waiters.get_mut(token).unwrap().take().unwrap().1;
if let Err(conn) = tx.send(Ok(IoConnection::new(
io,
created,
Some(Acquired(key.clone(), Some(this.inner.clone()))),
))) {
let (io, created) = conn.unwrap().into_inner();
inner.release_conn(&key, io, created);
}
}
Acquire::Available => {
let (connect, tx) =
inner.waiters.get_mut(token).unwrap().take().unwrap();
OpenWaitingConnection::spawn(
key.clone(),
tx,
this.inner.clone(),
this.connector.call(connect),
inner.config.clone(),
);
}
}
let _ = inner.waiters_queue.swap_remove_index(0);
}
Poll::Pending
}
}
#[pin_project::pin_project(PinnedDrop)]
struct OpenWaitingConnection<F, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
#[pin]
fut: F,
key: Key,
h2: Option<
LocalBoxFuture<
'static,
Result<(SendRequest<Bytes>, Connection<Io, Bytes>), h2::Error>,
>,
>,
rx: Option<oneshot::Sender<Result<IoConnection<Io>, ConnectError>>>,
inner: Option<Rc<RefCell<Inner<Io>>>>,
config: ConnectorConfig,
}
impl<F, Io> OpenWaitingConnection<F, Io>
where
F: Future<Output = Result<(Io, Protocol), ConnectError>> + 'static,
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn spawn(
key: Key,
rx: oneshot::Sender<Result<IoConnection<Io>, ConnectError>>,
inner: Rc<RefCell<Inner<Io>>>,
fut: F,
config: ConnectorConfig,
) {
actix_rt::spawn(OpenWaitingConnection {
key,
fut,
h2: None,
rx: Some(rx),
inner: Some(inner),
config,
})
}
}
#[pin_project::pinned_drop]
impl<F, Io> PinnedDrop for OpenWaitingConnection<F, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn drop(self: Pin<&mut Self>) {
if let Some(inner) = self.project().inner.take() {
let mut inner = inner.as_ref().borrow_mut();
inner.release();
inner.check_availability();
}
}
}
impl<F, Io> Future for OpenWaitingConnection<F, Io>
where
F: Future<Output = Result<(Io, Protocol), ConnectError>>,
Io: AsyncRead + AsyncWrite + Unpin,
{
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.as_mut().project();
if let Some(ref mut h2) = this.h2 {
return match Pin::new(h2).poll(cx) {
Poll::Ready(Ok((snd, connection))) => {
actix_rt::spawn(connection.map(|_| ()));
let rx = this.rx.take().unwrap();
let _ = rx.send(Ok(IoConnection::new(
ConnectionType::H2(snd),
Instant::now(),
Some(Acquired(this.key.clone(), this.inner.take())),
)));
Poll::Ready(())
}
Poll::Pending => Poll::Pending,
Poll::Ready(Err(err)) => {
let _ = this.inner.take();
if let Some(rx) = this.rx.take() {
let _ = rx.send(Err(ConnectError::H2(err)));
}
Poll::Ready(())
}
};
}
match this.fut.poll(cx) {
Poll::Ready(Err(err)) => {
let _ = this.inner.take();
if let Some(rx) = this.rx.take() {
let _ = rx.send(Err(err));
}
Poll::Ready(())
}
Poll::Ready(Ok((io, proto))) => {
if proto == Protocol::Http1 {
let rx = this.rx.take().unwrap();
let _ = rx.send(Ok(IoConnection::new(
ConnectionType::H1(io),
Instant::now(),
Some(Acquired(this.key.clone(), this.inner.take())),
)));
Poll::Ready(())
} else {
*this.h2 = Some(handshake(io, this.config).boxed_local());
self.poll(cx)
}
}
Poll::Pending => Poll::Pending,
}
}
}
pub(crate) struct Acquired<T>(Key, Option<Rc<RefCell<Inner<T>>>>);
impl<T> Acquired<T>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
pub(crate) fn close(&mut self, conn: IoConnection<T>) {
if let Some(inner) = self.1.take() {
let (io, _) = conn.into_inner();
inner.as_ref().borrow_mut().release_close(io);
}
}
pub(crate) fn release(&mut self, conn: IoConnection<T>) {
if let Some(inner) = self.1.take() {
let (io, created) = conn.into_inner();
inner
.as_ref()
.borrow_mut()
.release_conn(&self.0, io, created);
}
}
}
impl<T> Drop for Acquired<T> {
fn drop(&mut self) {
if let Some(inner) = self.1.take() {
inner.as_ref().borrow_mut().release();
}
}
}

View File

@ -1,40 +0,0 @@
use std::cell::RefCell;
use std::rc::Rc;
use std::task::{Context, Poll};
use actix_service::Service;
/// Service that allows to turn non-clone service to a service with `Clone` impl
///
/// # Panics
/// CloneableService might panic with some creative use of thread local storage.
/// See https://github.com/actix/actix-web/issues/1295 for example
#[doc(hidden)]
pub(crate) struct CloneableService<T: Service>(Rc<RefCell<T>>);
impl<T: Service> CloneableService<T> {
pub(crate) fn new(service: T) -> Self {
Self(Rc::new(RefCell::new(service)))
}
}
impl<T: Service> Clone for CloneableService<T> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
impl<T: Service> Service for CloneableService<T> {
type Request = T::Request;
type Response = T::Response;
type Error = T::Error;
type Future = T::Future;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.borrow_mut().poll_ready(cx)
}
fn call(&mut self, req: T::Request) -> Self::Future {
self.0.borrow_mut().call(req)
}
}

View File

@ -1,66 +1,36 @@
use std::cell::Cell;
use std::fmt::Write;
use std::rc::Rc;
use std::time::Duration;
use std::{fmt, net};
use std::{
net,
rc::Rc,
time::{Duration, Instant},
};
use actix_rt::time::{delay_for, delay_until, Delay, Instant};
use bytes::BytesMut;
use futures_util::{future, FutureExt};
use time::OffsetDateTime;
// "Sun, 06 Nov 1994 08:49:37 GMT".len()
const DATE_VALUE_LENGTH: usize = 29;
use crate::{date::DateService, KeepAlive};
#[derive(Debug, PartialEq, Clone, Copy)]
/// Server keep-alive setting
pub enum KeepAlive {
/// Keep alive in seconds
Timeout(usize),
/// Rely on OS to shutdown tcp connection
Os,
/// Disabled
Disabled,
}
impl From<usize> for KeepAlive {
fn from(keepalive: usize) -> Self {
KeepAlive::Timeout(keepalive)
}
}
impl From<Option<usize>> for KeepAlive {
fn from(keepalive: Option<usize>) -> Self {
if let Some(keepalive) = keepalive {
KeepAlive::Timeout(keepalive)
} else {
KeepAlive::Disabled
}
}
}
/// Http service configuration
/// HTTP service configuration.
#[derive(Debug, Clone)]
pub struct ServiceConfig(Rc<Inner>);
#[derive(Debug)]
struct Inner {
keep_alive: Option<Duration>,
client_timeout: u64,
client_disconnect: u64,
ka_enabled: bool,
keep_alive: KeepAlive,
client_request_timeout: Duration,
client_disconnect_timeout: Duration,
secure: bool,
local_addr: Option<std::net::SocketAddr>,
timer: DateService,
}
impl Clone for ServiceConfig {
fn clone(&self) -> Self {
ServiceConfig(self.0.clone())
}
date_service: DateService,
}
impl Default for ServiceConfig {
fn default() -> Self {
Self::new(KeepAlive::Timeout(5), 0, 0, false, None)
Self::new(
KeepAlive::default(),
Duration::from_secs(5),
Duration::ZERO,
false,
None,
)
}
}
@ -68,234 +38,169 @@ impl ServiceConfig {
/// Create instance of `ServiceConfig`
pub fn new(
keep_alive: KeepAlive,
client_timeout: u64,
client_disconnect: u64,
client_request_timeout: Duration,
client_disconnect_timeout: Duration,
secure: bool,
local_addr: Option<net::SocketAddr>,
) -> ServiceConfig {
let (keep_alive, ka_enabled) = match keep_alive {
KeepAlive::Timeout(val) => (val as u64, true),
KeepAlive::Os => (0, true),
KeepAlive::Disabled => (0, false),
};
let keep_alive = if ka_enabled && keep_alive > 0 {
Some(Duration::from_secs(keep_alive))
} else {
None
};
ServiceConfig(Rc::new(Inner {
keep_alive,
ka_enabled,
client_timeout,
client_disconnect,
keep_alive: keep_alive.normalize(),
client_request_timeout,
client_disconnect_timeout,
secure,
local_addr,
timer: DateService::new(),
date_service: DateService::new(),
}))
}
/// Returns `true` if connection is secure (i.e., using TLS / HTTPS).
#[inline]
/// Returns true if connection is secure(https)
pub fn secure(&self) -> bool {
self.0.secure
}
#[inline]
/// Returns the local address that this server is bound to.
///
/// Returns `None` for connections via UDS (Unix Domain Socket).
#[inline]
pub fn local_addr(&self) -> Option<net::SocketAddr> {
self.0.local_addr
}
/// Connection keep-alive setting.
#[inline]
/// Keep alive duration if configured.
pub fn keep_alive(&self) -> Option<Duration> {
pub fn keep_alive(&self) -> KeepAlive {
self.0.keep_alive
}
#[inline]
/// Return state of connection keep-alive functionality
pub fn keep_alive_enabled(&self) -> bool {
self.0.ka_enabled
}
#[inline]
/// Client timeout for first request.
pub fn client_timer(&self) -> Option<Delay> {
let delay_time = self.0.client_timeout;
if delay_time != 0 {
Some(delay_until(
self.0.timer.now() + Duration::from_millis(delay_time),
))
} else {
None
/// Creates a time object representing the deadline for this connection's keep-alive period, if
/// enabled.
///
/// When [`KeepAlive::Os`] or [`KeepAlive::Disabled`] is set, this will return `None`.
pub fn keep_alive_deadline(&self) -> Option<Instant> {
match self.keep_alive() {
KeepAlive::Timeout(dur) => Some(self.now() + dur),
KeepAlive::Os => None,
KeepAlive::Disabled => None,
}
}
/// Client timeout for first request.
pub fn client_timer_expire(&self) -> Option<Instant> {
let delay = self.0.client_timeout;
if delay != 0 {
Some(self.0.timer.now() + Duration::from_millis(delay))
} else {
None
}
/// Creates a time object representing the deadline for the client to finish sending the head of
/// its first request.
///
/// Returns `None` if this `ServiceConfig was` constructed with `client_request_timeout: 0`.
pub fn client_request_deadline(&self) -> Option<Instant> {
let timeout = self.0.client_request_timeout;
(timeout != Duration::ZERO).then(|| self.now() + timeout)
}
/// Client disconnect timer
pub fn client_disconnect_timer(&self) -> Option<Instant> {
let delay = self.0.client_disconnect;
if delay != 0 {
Some(self.0.timer.now() + Duration::from_millis(delay))
} else {
None
}
/// Creates a time object representing the deadline for the client to disconnect.
pub fn client_disconnect_deadline(&self) -> Option<Instant> {
let timeout = self.0.client_disconnect_timeout;
(timeout != Duration::ZERO).then(|| self.now() + timeout)
}
#[inline]
/// Return keep-alive timer delay is configured.
pub fn keep_alive_timer(&self) -> Option<Delay> {
if let Some(ka) = self.0.keep_alive {
Some(delay_until(self.0.timer.now() + ka))
} else {
None
}
}
/// Keep-alive expire time
pub fn keep_alive_expire(&self) -> Option<Instant> {
if let Some(ka) = self.0.keep_alive {
Some(self.0.timer.now() + ka)
} else {
None
}
}
#[inline]
pub(crate) fn now(&self) -> Instant {
self.0.timer.now()
self.0.date_service.now()
}
/// Writes date header to `dst` buffer.
///
/// Low-level method that utilizes the built-in efficient date service, requiring fewer syscalls
/// than normal. Note that a CRLF (`\r\n`) is included in what is written.
#[doc(hidden)]
pub fn set_date(&self, dst: &mut BytesMut) {
let mut buf: [u8; 39] = [0; 39];
buf[..6].copy_from_slice(b"date: ");
pub fn write_date_header(&self, dst: &mut BytesMut, camel_case: bool) {
let mut buf: [u8; 37] = [0; 37];
buf[..6].copy_from_slice(if camel_case { b"Date: " } else { b"date: " });
self.0
.timer
.set_date(|date| buf[6..35].copy_from_slice(&date.bytes));
buf[35..].copy_from_slice(b"\r\n\r\n");
.date_service
.with_date(|date| buf[6..35].copy_from_slice(&date.bytes));
buf[35..].copy_from_slice(b"\r\n");
dst.extend_from_slice(&buf);
}
pub(crate) fn set_date_header(&self, dst: &mut BytesMut) {
#[allow(unused)] // used with `http2` feature flag
pub(crate) fn write_date_header_value(&self, dst: &mut BytesMut) {
self.0
.timer
.set_date(|date| dst.extend_from_slice(&date.bytes));
}
}
#[derive(Copy, Clone)]
struct Date {
bytes: [u8; DATE_VALUE_LENGTH],
pos: usize,
}
impl Date {
fn new() -> Date {
let mut date = Date {
bytes: [0; DATE_VALUE_LENGTH],
pos: 0,
};
date.update();
date
}
fn update(&mut self) {
self.pos = 0;
write!(
self,
"{}",
OffsetDateTime::now_utc().format("%a, %d %b %Y %H:%M:%S GMT")
)
.unwrap();
}
}
impl fmt::Write for Date {
fn write_str(&mut self, s: &str) -> fmt::Result {
let len = s.len();
self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes());
self.pos += len;
Ok(())
}
}
#[derive(Clone)]
struct DateService(Rc<DateServiceInner>);
struct DateServiceInner {
current: Cell<Option<(Date, Instant)>>,
}
impl DateServiceInner {
fn new() -> Self {
DateServiceInner {
current: Cell::new(None),
}
}
fn reset(&self) {
self.current.take();
}
fn update(&self) {
let now = Instant::now();
let date = Date::new();
self.current.set(Some((date, now)));
}
}
impl DateService {
fn new() -> Self {
DateService(Rc::new(DateServiceInner::new()))
}
fn check_date(&self) {
if self.0.current.get().is_none() {
self.0.update();
// periodic date update
let s = self.clone();
actix_rt::spawn(delay_for(Duration::from_millis(500)).then(move |_| {
s.0.reset();
future::ready(())
}));
}
}
fn now(&self) -> Instant {
self.check_date();
self.0.current.get().unwrap().1
}
fn set_date<F: FnMut(&Date)>(&self, mut f: F) {
self.check_date();
f(&self.0.current.get().unwrap().0);
.date_service
.with_date(|date| dst.extend_from_slice(&date.bytes));
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{date::DATE_VALUE_LENGTH, notify_on_drop};
// Test modifying the date from within the closure
// passed to `set_date`
#[test]
fn test_evil_date() {
let service = DateService::new();
// Make sure that `check_date` doesn't try to spawn a task
service.0.update();
service.set_date(|_| service.0.reset());
use actix_rt::{
task::yield_now,
time::{sleep, sleep_until},
};
use memchr::memmem;
#[actix_rt::test]
async fn test_date_service_update() {
let settings =
ServiceConfig::new(KeepAlive::Os, Duration::ZERO, Duration::ZERO, false, None);
yield_now().await;
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf1, false);
let now1 = settings.now();
sleep_until((Instant::now() + Duration::from_secs(2)).into()).await;
yield_now().await;
let now2 = settings.now();
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf2, false);
assert_ne!(now1, now2);
assert_ne!(buf1, buf2);
drop(settings);
// Ensure the task will drop eventually
let mut times = 0;
while !notify_on_drop::is_dropped() {
sleep(Duration::from_millis(100)).await;
times += 1;
assert!(times < 10, "Timeout waiting for task drop");
}
}
#[actix_rt::test]
async fn test_date_service_drop() {
let service = Rc::new(DateService::new());
// yield so date service have a chance to register the spawned timer update task.
yield_now().await;
let clone1 = service.clone();
let clone2 = service.clone();
let clone3 = service.clone();
drop(clone1);
assert!(!notify_on_drop::is_dropped());
drop(clone2);
assert!(!notify_on_drop::is_dropped());
drop(clone3);
assert!(!notify_on_drop::is_dropped());
drop(service);
// Ensure the task will drop eventually
let mut times = 0;
while !notify_on_drop::is_dropped() {
sleep(Duration::from_millis(100)).await;
times += 1;
assert!(times < 10, "Timeout waiting for task drop");
}
}
#[test]
@ -305,11 +210,27 @@ mod tests {
#[actix_rt::test]
async fn test_date() {
let settings = ServiceConfig::new(KeepAlive::Os, 0, 0, false, None);
let settings = ServiceConfig::default();
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf1);
settings.write_date_header(&mut buf1, false);
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf2);
settings.write_date_header(&mut buf2, false);
assert_eq!(buf1, buf2);
}
#[actix_rt::test]
async fn test_date_camel_case() {
let settings = ServiceConfig::default();
let mut buf = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf, false);
assert!(memmem::find(&buf, b"date:").is_some());
let mut buf = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.write_date_header(&mut buf, true);
assert!(memmem::find(&buf, b"Date:").is_some());
}
}

92
actix-http/src/date.rs Normal file
View File

@ -0,0 +1,92 @@
use std::{
cell::Cell,
fmt::{self, Write},
rc::Rc,
time::{Duration, Instant, SystemTime},
};
use actix_rt::{task::JoinHandle, time::interval};
/// "Thu, 01 Jan 1970 00:00:00 GMT".len()
pub(crate) const DATE_VALUE_LENGTH: usize = 29;
#[derive(Clone, Copy)]
pub(crate) struct Date {
pub(crate) bytes: [u8; DATE_VALUE_LENGTH],
pos: usize,
}
impl Date {
fn new() -> Date {
let mut date = Date {
bytes: [0; DATE_VALUE_LENGTH],
pos: 0,
};
date.update();
date
}
fn update(&mut self) {
self.pos = 0;
write!(self, "{}", httpdate::fmt_http_date(SystemTime::now())).unwrap();
}
}
impl fmt::Write for Date {
fn write_str(&mut self, s: &str) -> fmt::Result {
let len = s.len();
self.bytes[self.pos..self.pos + len].copy_from_slice(s.as_bytes());
self.pos += len;
Ok(())
}
}
/// Service for update Date and Instant periodically at 500 millis interval.
pub(crate) struct DateService {
current: Rc<Cell<(Date, Instant)>>,
handle: JoinHandle<()>,
}
impl DateService {
pub(crate) fn new() -> Self {
// shared date and timer for DateService and update async task.
let current = Rc::new(Cell::new((Date::new(), Instant::now())));
let current_clone = Rc::clone(&current);
// spawn an async task sleep for 500 millis and update current date/timer in a loop.
// handle is used to stop the task on DateService drop.
let handle = actix_rt::spawn(async move {
#[cfg(test)]
let _notify = crate::notify_on_drop::NotifyOnDrop::new();
let mut interval = interval(Duration::from_millis(500));
loop {
let now = interval.tick().await;
let date = Date::new();
current_clone.set((date, now.into_std()));
}
});
DateService { current, handle }
}
pub(crate) fn now(&self) -> Instant {
self.current.get().1
}
pub(crate) fn with_date<F: FnMut(&Date)>(&self, mut f: F) {
f(&self.current.get().0);
}
}
impl fmt::Debug for DateService {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("DateService").finish_non_exhaustive()
}
}
impl Drop for DateService {
fn drop(&mut self) {
// stop the timer update async task on drop.
self.handle.abort();
}
}

View File

@ -1,25 +1,38 @@
use std::future::Future;
use std::io::{self, Write};
use std::pin::Pin;
use std::task::{Context, Poll};
//! Stream decoders.
use actix_threadpool::{run, CpuFuture};
use brotli2::write::BrotliDecoder;
use std::{
future::Future,
io::{self, Write as _},
pin::Pin,
task::{Context, Poll},
};
use actix_rt::task::{spawn_blocking, JoinHandle};
use bytes::Bytes;
use flate2::write::{GzDecoder, ZlibDecoder};
use futures_core::{ready, Stream};
use super::Writer;
use crate::error::PayloadError;
use crate::http::header::{ContentEncoding, HeaderMap, CONTENT_ENCODING};
#[cfg(feature = "compress-gzip")]
use flate2::write::{GzDecoder, ZlibDecoder};
const INPLACE: usize = 2049;
#[cfg(feature = "compress-zstd")]
use zstd::stream::write::Decoder as ZstdDecoder;
pub struct Decoder<S> {
decoder: Option<ContentDecoder>,
stream: S,
eof: bool,
fut: Option<CpuFuture<(Option<Bytes>, ContentDecoder), io::Error>>,
use crate::{
encoding::Writer,
error::PayloadError,
header::{ContentEncoding, HeaderMap, CONTENT_ENCODING},
};
const MAX_CHUNK_SIZE_DECODE_IN_PLACE: usize = 2049;
pin_project_lite::pin_project! {
pub struct Decoder<S> {
decoder: Option<ContentDecoder>,
#[pin]
stream: S,
eof: bool,
fut: Option<JoinHandle<Result<(Option<Bytes>, ContentDecoder), io::Error>>>,
}
}
impl<S> Decoder<S>
@ -30,17 +43,31 @@ where
#[inline]
pub fn new(stream: S, encoding: ContentEncoding) -> Decoder<S> {
let decoder = match encoding {
ContentEncoding::Br => Some(ContentDecoder::Br(Box::new(
BrotliDecoder::new(Writer::new()),
#[cfg(feature = "compress-brotli")]
ContentEncoding::Brotli => Some(ContentDecoder::Brotli(Box::new(
brotli::DecompressorWriter::new(Writer::new(), 8_096),
))),
#[cfg(feature = "compress-gzip")]
ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new(
ZlibDecoder::new(Writer::new()),
))),
ContentEncoding::Gzip => Some(ContentDecoder::Gzip(Box::new(
GzDecoder::new(Writer::new()),
#[cfg(feature = "compress-gzip")]
ContentEncoding::Gzip => Some(ContentDecoder::Gzip(Box::new(GzDecoder::new(
Writer::new(),
)))),
#[cfg(feature = "compress-zstd")]
ContentEncoding::Zstd => Some(ContentDecoder::Zstd(Box::new(
ZstdDecoder::new(Writer::new()).expect(
"Failed to create zstd decoder. This is a bug. \
Please report it to the actix-web repository.",
),
))),
_ => None,
};
Decoder {
decoder,
stream,
@ -53,15 +80,11 @@ where
#[inline]
pub fn from_headers(stream: S, headers: &HeaderMap) -> Decoder<S> {
// check content-encoding
let encoding = if let Some(enc) = headers.get(&CONTENT_ENCODING) {
if let Ok(enc) = enc.to_str() {
ContentEncoding::from(enc)
} else {
ContentEncoding::Identity
}
} else {
ContentEncoding::Identity
};
let encoding = headers
.get(&CONTENT_ENCODING)
.and_then(|val| val.to_str().ok())
.and_then(|x| x.parse().ok())
.unwrap_or(ContentEncoding::Identity);
Self::new(stream, encoding)
}
@ -69,55 +92,63 @@ where
impl<S> Stream for Decoder<S>
where
S: Stream<Item = Result<Bytes, PayloadError>> + Unpin,
S: Stream<Item = Result<Bytes, PayloadError>>,
{
type Item = Result<Bytes, PayloadError>;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
let mut this = self.project();
loop {
if let Some(ref mut fut) = self.fut {
let (chunk, decoder) = match ready!(Pin::new(fut).poll(cx)) {
Ok(item) => item,
Err(e) => return Poll::Ready(Some(Err(e.into()))),
};
self.decoder = Some(decoder);
self.fut.take();
if let Some(ref mut fut) = this.fut {
let (chunk, decoder) = ready!(Pin::new(fut).poll(cx)).map_err(|_| {
PayloadError::Io(io::Error::new(
io::ErrorKind::Other,
"Blocking task was cancelled unexpectedly",
))
})??;
*this.decoder = Some(decoder);
this.fut.take();
if let Some(chunk) = chunk {
return Poll::Ready(Some(Ok(chunk)));
}
}
if self.eof {
if *this.eof {
return Poll::Ready(None);
}
match Pin::new(&mut self.stream).poll_next(cx) {
Poll::Ready(Some(Err(err))) => return Poll::Ready(Some(Err(err))),
Poll::Ready(Some(Ok(chunk))) => {
if let Some(mut decoder) = self.decoder.take() {
if chunk.len() < INPLACE {
match ready!(this.stream.as_mut().poll_next(cx)) {
Some(Err(err)) => return Poll::Ready(Some(Err(err))),
Some(Ok(chunk)) => {
if let Some(mut decoder) = this.decoder.take() {
if chunk.len() < MAX_CHUNK_SIZE_DECODE_IN_PLACE {
let chunk = decoder.feed_data(chunk)?;
self.decoder = Some(decoder);
*this.decoder = Some(decoder);
if let Some(chunk) = chunk {
return Poll::Ready(Some(Ok(chunk)));
}
} else {
self.fut = Some(run(move || {
*this.fut = Some(spawn_blocking(move || {
let chunk = decoder.feed_data(chunk)?;
Ok((chunk, decoder))
}));
}
continue;
} else {
return Poll::Ready(Some(Ok(chunk)));
}
}
Poll::Ready(None) => {
self.eof = true;
return if let Some(mut decoder) = self.decoder.take() {
None => {
*this.eof = true;
return if let Some(mut decoder) = this.decoder.take() {
match decoder.feed_eof() {
Ok(Some(res)) => Poll::Ready(Some(Ok(res))),
Ok(None) => Poll::Ready(None),
@ -127,25 +158,35 @@ where
Poll::Ready(None)
};
}
Poll::Pending => break,
}
}
Poll::Pending
}
}
enum ContentDecoder {
#[cfg(feature = "compress-gzip")]
Deflate(Box<ZlibDecoder<Writer>>),
#[cfg(feature = "compress-gzip")]
Gzip(Box<GzDecoder<Writer>>),
Br(Box<BrotliDecoder<Writer>>),
#[cfg(feature = "compress-brotli")]
Brotli(Box<brotli::DecompressorWriter<Writer>>),
// We need explicit 'static lifetime here because ZstdDecoder need lifetime
// argument, and we use `spawn_blocking` in `Decoder::poll_next` that require `FnOnce() -> R + Send + 'static`
#[cfg(feature = "compress-zstd")]
Zstd(Box<ZstdDecoder<'static, Writer>>),
}
impl ContentDecoder {
fn feed_eof(&mut self) -> io::Result<Option<Bytes>> {
match self {
ContentDecoder::Br(ref mut decoder) => match decoder.flush() {
#[cfg(feature = "compress-brotli")]
ContentDecoder::Brotli(ref mut decoder) => match decoder.flush() {
Ok(()) => {
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
@ -154,7 +195,23 @@ impl ContentDecoder {
}
Err(e) => Err(e),
},
#[cfg(feature = "compress-gzip")]
ContentDecoder::Gzip(ref mut decoder) => match decoder.try_finish() {
Ok(_) => {
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
#[cfg(feature = "compress-gzip")]
ContentDecoder::Deflate(ref mut decoder) => match decoder.try_finish() {
Ok(_) => {
let b = decoder.get_mut().take();
if !b.is_empty() {
@ -165,7 +222,9 @@ impl ContentDecoder {
}
Err(e) => Err(e),
},
ContentDecoder::Deflate(ref mut decoder) => match decoder.try_finish() {
#[cfg(feature = "compress-zstd")]
ContentDecoder::Zstd(ref mut decoder) => match decoder.flush() {
Ok(_) => {
let b = decoder.get_mut().take();
if !b.is_empty() {
@ -181,10 +240,12 @@ impl ContentDecoder {
fn feed_data(&mut self, data: Bytes) -> io::Result<Option<Bytes>> {
match self {
ContentDecoder::Br(ref mut decoder) => match decoder.write_all(&data) {
#[cfg(feature = "compress-brotli")]
ContentDecoder::Brotli(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
@ -193,10 +254,13 @@ impl ContentDecoder {
}
Err(e) => Err(e),
},
#[cfg(feature = "compress-gzip")]
ContentDecoder::Gzip(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
@ -205,9 +269,27 @@ impl ContentDecoder {
}
Err(e) => Err(e),
},
#[cfg(feature = "compress-gzip")]
ContentDecoder::Deflate(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
Ok(None)
}
}
Err(e) => Err(e),
},
#[cfg(feature = "compress-zstd")]
ContentDecoder::Zstd(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))

View File

@ -1,166 +1,211 @@
//! Stream encoder
use std::future::Future;
use std::io::{self, Write};
use std::pin::Pin;
use std::task::{Context, Poll};
//! Stream encoders.
use actix_threadpool::{run, CpuFuture};
use brotli2::write::BrotliEncoder;
use std::{
error::Error as StdError,
future::Future,
io::{self, Write as _},
pin::Pin,
task::{Context, Poll},
};
use actix_rt::task::{spawn_blocking, JoinHandle};
use bytes::Bytes;
use flate2::write::{GzEncoder, ZlibEncoder};
use derive_more::Display;
use futures_core::ready;
use pin_project::pin_project;
use pin_project_lite::pin_project;
use crate::body::{Body, BodySize, MessageBody, ResponseBody};
use crate::http::header::{ContentEncoding, CONTENT_ENCODING};
use crate::http::{HeaderValue, StatusCode};
use crate::{Error, ResponseHead};
#[cfg(feature = "compress-gzip")]
use flate2::write::{GzEncoder, ZlibEncoder};
#[cfg(feature = "compress-zstd")]
use zstd::stream::write::Encoder as ZstdEncoder;
use super::Writer;
use crate::{
body::{self, BodySize, MessageBody},
header::{self, ContentEncoding, HeaderValue, CONTENT_ENCODING},
ResponseHead, StatusCode,
};
const INPLACE: usize = 1024;
const MAX_CHUNK_SIZE_ENCODE_IN_PLACE: usize = 1024;
#[pin_project]
pub struct Encoder<B> {
eof: bool,
#[pin]
body: EncoderBody<B>,
encoder: Option<ContentEncoder>,
fut: Option<CpuFuture<ContentEncoder, io::Error>>,
pin_project! {
pub struct Encoder<B> {
#[pin]
body: EncoderBody<B>,
encoder: Option<ContentEncoder>,
fut: Option<JoinHandle<Result<ContentEncoder, io::Error>>>,
eof: bool,
}
}
impl<B: MessageBody> Encoder<B> {
pub fn response(
encoding: ContentEncoding,
head: &mut ResponseHead,
body: ResponseBody<B>,
) -> ResponseBody<Encoder<B>> {
let can_encode = !(head.headers().contains_key(&CONTENT_ENCODING)
fn none() -> Self {
Encoder {
body: EncoderBody::None {
body: body::None::new(),
},
encoder: None,
fut: None,
eof: true,
}
}
pub fn response(encoding: ContentEncoding, head: &mut ResponseHead, body: B) -> Self {
// no need to compress an empty body
if matches!(body.size(), BodySize::None) {
return Self::none();
}
let should_encode = !(head.headers().contains_key(&CONTENT_ENCODING)
|| head.status == StatusCode::SWITCHING_PROTOCOLS
|| head.status == StatusCode::NO_CONTENT
|| encoding == ContentEncoding::Identity
|| encoding == ContentEncoding::Auto);
|| encoding == ContentEncoding::Identity);
let body = match body {
ResponseBody::Other(b) => match b {
Body::None => return ResponseBody::Other(Body::None),
Body::Empty => return ResponseBody::Other(Body::Empty),
Body::Bytes(buf) => {
if can_encode {
EncoderBody::Bytes(buf)
} else {
return ResponseBody::Other(Body::Bytes(buf));
}
}
Body::Message(stream) => EncoderBody::BoxedStream(stream),
},
ResponseBody::Body(stream) => EncoderBody::Stream(stream),
let body = match body.try_into_bytes() {
Ok(body) => EncoderBody::Full { body },
Err(body) => EncoderBody::Stream { body },
};
if can_encode {
// Modify response body only if encoder is not None
if let Some(enc) = ContentEncoder::encoder(encoding) {
if should_encode {
// wrap body only if encoder is feature-enabled
if let Some(enc) = ContentEncoder::select(encoding) {
update_head(encoding, head);
head.no_chunking(false);
return ResponseBody::Body(Encoder {
return Encoder {
body,
eof: false,
fut: None,
encoder: Some(enc),
});
fut: None,
eof: false,
};
}
}
ResponseBody::Body(Encoder {
Encoder {
body,
eof: false,
fut: None,
encoder: None,
})
fut: None,
eof: false,
}
}
}
#[pin_project(project = EncoderBodyProj)]
enum EncoderBody<B> {
Bytes(Bytes),
Stream(#[pin] B),
BoxedStream(Box<dyn MessageBody + Unpin>),
pin_project! {
#[project = EncoderBodyProj]
enum EncoderBody<B> {
None { body: body::None },
Full { body: Bytes },
Stream { #[pin] body: B },
}
}
impl<B: MessageBody> MessageBody for EncoderBody<B> {
impl<B> MessageBody for EncoderBody<B>
where
B: MessageBody,
{
type Error = EncoderError;
#[inline]
fn size(&self) -> BodySize {
match self {
EncoderBody::Bytes(ref b) => b.size(),
EncoderBody::Stream(ref b) => b.size(),
EncoderBody::BoxedStream(ref b) => b.size(),
EncoderBody::None { body } => body.size(),
EncoderBody::Full { body } => body.size(),
EncoderBody::Stream { body } => body.size(),
}
}
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
) -> Poll<Option<Result<Bytes, Self::Error>>> {
match self.project() {
EncoderBodyProj::Bytes(b) => {
if b.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(std::mem::take(b))))
}
EncoderBodyProj::None { body } => {
Pin::new(body).poll_next(cx).map_err(|err| match err {})
}
EncoderBodyProj::Stream(b) => b.poll_next(cx),
EncoderBodyProj::BoxedStream(ref mut b) => {
Pin::new(b.as_mut()).poll_next(cx)
EncoderBodyProj::Full { body } => {
Pin::new(body).poll_next(cx).map_err(|err| match err {})
}
EncoderBodyProj::Stream { body } => body
.poll_next(cx)
.map_err(|err| EncoderError::Body(err.into())),
}
}
#[inline]
fn try_into_bytes(self) -> Result<Bytes, Self>
where
Self: Sized,
{
match self {
EncoderBody::None { body } => Ok(body.try_into_bytes().unwrap()),
EncoderBody::Full { body } => Ok(body.try_into_bytes().unwrap()),
_ => Err(self),
}
}
}
impl<B: MessageBody> MessageBody for Encoder<B> {
impl<B> MessageBody for Encoder<B>
where
B: MessageBody,
{
type Error = EncoderError;
#[inline]
fn size(&self) -> BodySize {
if self.encoder.is_none() {
self.body.size()
} else {
if self.encoder.is_some() {
BodySize::Stream
} else {
self.body.size()
}
}
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
) -> Poll<Option<Result<Bytes, Self::Error>>> {
let mut this = self.project();
loop {
if *this.eof {
return Poll::Ready(None);
}
if let Some(ref mut fut) = this.fut {
let mut encoder = match ready!(Pin::new(fut).poll(cx)) {
Ok(item) => item,
Err(e) => return Poll::Ready(Some(Err(e.into()))),
};
let mut encoder = ready!(Pin::new(fut).poll(cx))
.map_err(|_| {
EncoderError::Io(io::Error::new(
io::ErrorKind::Other,
"Blocking task was cancelled unexpectedly",
))
})?
.map_err(EncoderError::Io)?;
let chunk = encoder.take();
*this.encoder = Some(encoder);
this.fut.take();
if !chunk.is_empty() {
return Poll::Ready(Some(Ok(chunk)));
}
}
let result = this.body.as_mut().poll_next(cx);
let result = ready!(this.body.as_mut().poll_next(cx));
match result {
Poll::Ready(Some(Ok(chunk))) => {
Some(Err(err)) => return Poll::Ready(Some(Err(err))),
Some(Ok(chunk)) => {
if let Some(mut encoder) = this.encoder.take() {
if chunk.len() < INPLACE {
encoder.write(&chunk)?;
if chunk.len() < MAX_CHUNK_SIZE_ENCODE_IN_PLACE {
encoder.write(&chunk).map_err(EncoderError::Io)?;
let chunk = encoder.take();
*this.encoder = Some(encoder);
if !chunk.is_empty() {
return Poll::Ready(Some(Ok(chunk)));
}
} else {
*this.fut = Some(run(move || {
*this.fut = Some(spawn_blocking(move || {
encoder.write(&chunk)?;
Ok(encoder)
}));
@ -169,9 +214,11 @@ impl<B: MessageBody> MessageBody for Encoder<B> {
return Poll::Ready(Some(Ok(chunk)));
}
}
Poll::Ready(None) => {
None => {
if let Some(encoder) = this.encoder.take() {
let chunk = encoder.finish()?;
let chunk = encoder.finish().map_err(EncoderError::Io)?;
if chunk.is_empty() {
return Poll::Ready(None);
} else {
@ -182,39 +229,78 @@ impl<B: MessageBody> MessageBody for Encoder<B> {
return Poll::Ready(None);
}
}
val => return val,
}
}
}
#[inline]
fn try_into_bytes(mut self) -> Result<Bytes, Self>
where
Self: Sized,
{
if self.encoder.is_some() {
Err(self)
} else {
match self.body.try_into_bytes() {
Ok(body) => Ok(body),
Err(body) => {
self.body = body;
Err(self)
}
}
}
}
}
fn update_head(encoding: ContentEncoding, head: &mut ResponseHead) {
head.headers_mut().insert(
CONTENT_ENCODING,
HeaderValue::from_static(encoding.as_str()),
);
head.headers_mut()
.insert(header::CONTENT_ENCODING, encoding.to_header_value());
head.headers_mut()
.insert(header::VARY, HeaderValue::from_static("accept-encoding"));
head.no_chunking(false);
}
enum ContentEncoder {
#[cfg(feature = "compress-gzip")]
Deflate(ZlibEncoder<Writer>),
#[cfg(feature = "compress-gzip")]
Gzip(GzEncoder<Writer>),
Br(BrotliEncoder<Writer>),
#[cfg(feature = "compress-brotli")]
Brotli(Box<brotli::CompressorWriter<Writer>>),
// Wwe need explicit 'static lifetime here because ZstdEncoder needs a lifetime argument and we
// use `spawn_blocking` in `Encoder::poll_next` that requires `FnOnce() -> R + Send + 'static`.
#[cfg(feature = "compress-zstd")]
Zstd(ZstdEncoder<'static, Writer>),
}
impl ContentEncoder {
fn encoder(encoding: ContentEncoding) -> Option<Self> {
fn select(encoding: ContentEncoding) -> Option<Self> {
match encoding {
#[cfg(feature = "compress-gzip")]
ContentEncoding::Deflate => Some(ContentEncoder::Deflate(ZlibEncoder::new(
Writer::new(),
flate2::Compression::fast(),
))),
#[cfg(feature = "compress-gzip")]
ContentEncoding::Gzip => Some(ContentEncoder::Gzip(GzEncoder::new(
Writer::new(),
flate2::Compression::fast(),
))),
ContentEncoding::Br => {
Some(ContentEncoder::Br(BrotliEncoder::new(Writer::new(), 3)))
#[cfg(feature = "compress-brotli")]
ContentEncoding::Brotli => Some(ContentEncoder::Brotli(new_brotli_compressor())),
#[cfg(feature = "compress-zstd")]
ContentEncoding::Zstd => {
let encoder = ZstdEncoder::new(Writer::new(), 3).ok()?;
Some(ContentEncoder::Zstd(encoder))
}
_ => None,
}
}
@ -222,52 +308,122 @@ impl ContentEncoder {
#[inline]
pub(crate) fn take(&mut self) -> Bytes {
match *self {
ContentEncoder::Br(ref mut encoder) => encoder.get_mut().take(),
#[cfg(feature = "compress-brotli")]
ContentEncoder::Brotli(ref mut encoder) => encoder.get_mut().take(),
#[cfg(feature = "compress-gzip")]
ContentEncoder::Deflate(ref mut encoder) => encoder.get_mut().take(),
#[cfg(feature = "compress-gzip")]
ContentEncoder::Gzip(ref mut encoder) => encoder.get_mut().take(),
#[cfg(feature = "compress-zstd")]
ContentEncoder::Zstd(ref mut encoder) => encoder.get_mut().take(),
}
}
fn finish(self) -> Result<Bytes, io::Error> {
match self {
ContentEncoder::Br(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()),
#[cfg(feature = "compress-brotli")]
ContentEncoder::Brotli(mut encoder) => match encoder.flush() {
Ok(()) => Ok(encoder.into_inner().buf.freeze()),
Err(err) => Err(err),
},
#[cfg(feature = "compress-gzip")]
ContentEncoder::Gzip(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err),
},
#[cfg(feature = "compress-gzip")]
ContentEncoder::Deflate(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err),
},
#[cfg(feature = "compress-zstd")]
ContentEncoder::Zstd(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err),
},
}
}
fn write(&mut self, data: &[u8]) -> Result<(), io::Error> {
match *self {
ContentEncoder::Br(ref mut encoder) => match encoder.write_all(data) {
#[cfg(feature = "compress-brotli")]
ContentEncoder::Brotli(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()),
Err(err) => {
trace!("Error decoding br encoding: {}", err);
log::trace!("Error decoding br encoding: {}", err);
Err(err)
}
},
#[cfg(feature = "compress-gzip")]
ContentEncoder::Gzip(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()),
Err(err) => {
trace!("Error decoding gzip encoding: {}", err);
log::trace!("Error decoding gzip encoding: {}", err);
Err(err)
}
},
#[cfg(feature = "compress-gzip")]
ContentEncoder::Deflate(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()),
Err(err) => {
trace!("Error decoding deflate encoding: {}", err);
log::trace!("Error decoding deflate encoding: {}", err);
Err(err)
}
},
#[cfg(feature = "compress-zstd")]
ContentEncoder::Zstd(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(()),
Err(err) => {
log::trace!("Error decoding ztsd encoding: {}", err);
Err(err)
}
},
}
}
}
#[cfg(feature = "compress-brotli")]
fn new_brotli_compressor() -> Box<brotli::CompressorWriter<Writer>> {
Box::new(brotli::CompressorWriter::new(
Writer::new(),
32 * 1024, // 32 KiB buffer
3, // BROTLI_PARAM_QUALITY
22, // BROTLI_PARAM_LGWIN
))
}
#[derive(Debug, Display)]
#[non_exhaustive]
pub enum EncoderError {
/// Wrapped body stream error.
#[display(fmt = "body")]
Body(Box<dyn StdError>),
/// Generic I/O error.
#[display(fmt = "io")]
Io(io::Error),
}
impl StdError for EncoderError {
fn source(&self) -> Option<&(dyn StdError + 'static)> {
match self {
EncoderError::Body(err) => Some(&**err),
EncoderError::Io(err) => Some(err),
}
}
}
impl From<EncoderError> for crate::Error {
fn from(err: EncoderError) -> Self {
crate::Error::new_encoder().with_cause(err)
}
}

View File

@ -1,4 +1,5 @@
//! Content-Encoding support
//! Content-Encoding support.
use std::io;
use bytes::{Bytes, BytesMut};
@ -9,6 +10,9 @@ mod encoder;
pub use self::decoder::Decoder;
pub use self::encoder::Encoder;
/// Special-purpose writer for streaming (de-)compression.
///
/// Pre-allocates 8KiB of capacity.
pub(self) struct Writer {
buf: BytesMut,
}

File diff suppressed because it is too large Load Diff

View File

@ -1,62 +1,118 @@
use std::any::{Any, TypeId};
use std::{fmt, mem};
use std::{
any::{Any, TypeId},
fmt,
};
use fxhash::FxHashMap;
use ahash::AHashMap;
/// A type map of request extensions.
/// A type map for request extensions.
///
/// All entries into this map must be owned types (or static references).
#[derive(Default)]
pub struct Extensions {
/// Use FxHasher with a std HashMap with for faster
/// lookups on the small `TypeId` (u64 equivalent) keys.
map: FxHashMap<TypeId, Box<dyn Any>>,
/// Use AHasher with a std HashMap with for faster lookups on the small `TypeId` keys.
map: AHashMap<TypeId, Box<dyn Any>>,
}
impl Extensions {
/// Create an empty `Extensions`.
/// Creates an empty `Extensions`.
#[inline]
pub fn new() -> Extensions {
Extensions {
map: FxHashMap::default(),
map: AHashMap::new(),
}
}
/// Insert a type into this `Extensions`.
/// Insert an item into the map.
///
/// If a extension of this type already existed, it will
/// be returned.
pub fn insert<T: 'static>(&mut self, val: T) {
self.map.insert(TypeId::of::<T>(), Box::new(val));
/// If an item of this type was already stored, it will be replaced and returned.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
/// assert_eq!(map.insert(""), None);
/// assert_eq!(map.insert(1u32), None);
/// assert_eq!(map.insert(2u32), Some(1u32));
/// assert_eq!(*map.get::<u32>().unwrap(), 2u32);
/// ```
pub fn insert<T: 'static>(&mut self, val: T) -> Option<T> {
self.map
.insert(TypeId::of::<T>(), Box::new(val))
.and_then(downcast_owned)
}
/// Check if container contains entry
/// Check if map contains an item of a given type.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
/// assert!(!map.contains::<u32>());
///
/// assert_eq!(map.insert(1u32), None);
/// assert!(map.contains::<u32>());
/// ```
pub fn contains<T: 'static>(&self) -> bool {
self.map.contains_key(&TypeId::of::<T>())
}
/// Get a reference to a type previously inserted on this `Extensions`.
/// Get a reference to an item of a given type.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
/// map.insert(1u32);
/// assert_eq!(map.get::<u32>(), Some(&1u32));
/// ```
pub fn get<T: 'static>(&self) -> Option<&T> {
self.map
.get(&TypeId::of::<T>())
.and_then(|boxed| boxed.downcast_ref())
}
/// Get a mutable reference to a type previously inserted on this `Extensions`.
/// Get a mutable reference to an item of a given type.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
/// map.insert(1u32);
/// assert_eq!(map.get_mut::<u32>(), Some(&mut 1u32));
/// ```
pub fn get_mut<T: 'static>(&mut self) -> Option<&mut T> {
self.map
.get_mut(&TypeId::of::<T>())
.and_then(|boxed| boxed.downcast_mut())
}
/// Remove a type from this `Extensions`.
/// Remove an item from the map of a given type.
///
/// If a extension of this type existed, it will be returned.
/// If an item of this type was already stored, it will be returned.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
///
/// map.insert(1u32);
/// assert_eq!(map.get::<u32>(), Some(&1u32));
///
/// assert_eq!(map.remove::<u32>(), Some(1u32));
/// assert!(!map.contains::<u32>());
/// ```
pub fn remove<T: 'static>(&mut self) -> Option<T> {
self.map
.remove(&TypeId::of::<T>())
.and_then(|boxed| boxed.downcast().ok().map(|boxed| *boxed))
self.map.remove(&TypeId::of::<T>()).and_then(downcast_owned)
}
/// Clear the `Extensions` of all inserted extensions.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
///
/// map.insert(1u32);
/// assert!(map.contains::<u32>());
///
/// map.clear();
/// assert!(!map.contains::<u32>());
/// ```
#[inline]
pub fn clear(&mut self) {
self.map.clear();
@ -66,11 +122,6 @@ impl Extensions {
pub fn extend(&mut self, other: Extensions) {
self.map.extend(other.map);
}
/// Sets (or overrides) items from `other` into this map.
pub(crate) fn drain_from(&mut self, other: &mut Self) {
self.map.extend(mem::take(&mut other.map));
}
}
impl fmt::Debug for Extensions {
@ -79,6 +130,10 @@ impl fmt::Debug for Extensions {
}
}
fn downcast_owned<T: 'static>(boxed: Box<dyn Any>) -> Option<T> {
boxed.downcast().ok().map(|boxed| *boxed)
}
#[cfg(test)]
mod tests {
use super::*;
@ -118,6 +173,8 @@ mod tests {
#[test]
fn test_integers() {
static A: u32 = 8;
let mut map = Extensions::new();
map.insert::<i8>(8);
@ -130,6 +187,7 @@ mod tests {
map.insert::<u32>(32);
map.insert::<u64>(64);
map.insert::<u128>(128);
map.insert::<&'static u32>(&A);
assert!(map.get::<i8>().is_some());
assert!(map.get::<i16>().is_some());
assert!(map.get::<i32>().is_some());
@ -140,6 +198,7 @@ mod tests {
assert!(map.get::<u32>().is_some());
assert!(map.get::<u64>().is_some());
assert!(map.get::<u128>().is_some());
assert!(map.get::<&'static u32>().is_some());
}
#[test]
@ -218,27 +277,4 @@ mod tests {
assert_eq!(extensions.get(), Some(&20u8));
assert_eq!(extensions.get_mut(), Some(&mut 20u8));
}
#[test]
fn test_drain_from() {
let mut ext = Extensions::new();
ext.insert(2isize);
let mut more_ext = Extensions::new();
more_ext.insert(5isize);
more_ext.insert(5usize);
assert_eq!(ext.get::<isize>(), Some(&2isize));
assert_eq!(ext.get::<usize>(), None);
assert_eq!(more_ext.get::<isize>(), Some(&5isize));
assert_eq!(more_ext.get::<usize>(), Some(&5usize));
ext.drain_from(&mut more_ext);
assert_eq!(ext.get::<isize>(), Some(&5isize));
assert_eq!(ext.get::<usize>(), Some(&5usize));
assert_eq!(more_ext.get::<isize>(), None);
assert_eq!(more_ext.get::<usize>(), None);
}
}

View File

@ -0,0 +1,426 @@
use std::{io, task::Poll};
use bytes::{Buf as _, Bytes, BytesMut};
macro_rules! byte (
($rdr:ident) => ({
if $rdr.len() > 0 {
let b = $rdr[0];
$rdr.advance(1);
b
} else {
return Poll::Pending
}
})
);
#[derive(Debug, PartialEq, Clone)]
pub(super) enum ChunkedState {
Size,
SizeLws,
Extension,
SizeLf,
Body,
BodyCr,
BodyLf,
EndCr,
EndLf,
End,
}
impl ChunkedState {
pub(super) fn step(
&self,
body: &mut BytesMut,
size: &mut u64,
buf: &mut Option<Bytes>,
) -> Poll<Result<ChunkedState, io::Error>> {
use self::ChunkedState::*;
match *self {
Size => ChunkedState::read_size(body, size),
SizeLws => ChunkedState::read_size_lws(body),
Extension => ChunkedState::read_extension(body),
SizeLf => ChunkedState::read_size_lf(body, *size),
Body => ChunkedState::read_body(body, size, buf),
BodyCr => ChunkedState::read_body_cr(body),
BodyLf => ChunkedState::read_body_lf(body),
EndCr => ChunkedState::read_end_cr(body),
EndLf => ChunkedState::read_end_lf(body),
End => Poll::Ready(Ok(ChunkedState::End)),
}
}
fn read_size(rdr: &mut BytesMut, size: &mut u64) -> Poll<Result<ChunkedState, io::Error>> {
let radix = 16;
let rem = match byte!(rdr) {
b @ b'0'..=b'9' => b - b'0',
b @ b'a'..=b'f' => b + 10 - b'a',
b @ b'A'..=b'F' => b + 10 - b'A',
b'\t' | b' ' => return Poll::Ready(Ok(ChunkedState::SizeLws)),
b';' => return Poll::Ready(Ok(ChunkedState::Extension)),
b'\r' => return Poll::Ready(Ok(ChunkedState::SizeLf)),
_ => {
return Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size line: Invalid Size",
)));
}
};
match size.checked_mul(radix) {
Some(n) => {
*size = n as u64;
*size += rem as u64;
Poll::Ready(Ok(ChunkedState::Size))
}
None => {
log::debug!("chunk size would overflow u64");
Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size line: Size is too big",
)))
}
}
}
fn read_size_lws(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
// LWS can follow the chunk size, but no more digits can come
b'\t' | b' ' => Poll::Ready(Ok(ChunkedState::SizeLws)),
b';' => Poll::Ready(Ok(ChunkedState::Extension)),
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size linear white space",
))),
}
}
fn read_extension(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
// strictly 0x20 (space) should be disallowed but we don't parse quoted strings here
0x00..=0x08 | 0x0a..=0x1f | 0x7f => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid character in chunk extension",
))),
_ => Poll::Ready(Ok(ChunkedState::Extension)), // no supported extensions
}
}
fn read_size_lf(rdr: &mut BytesMut, size: u64) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' if size > 0 => Poll::Ready(Ok(ChunkedState::Body)),
b'\n' if size == 0 => Poll::Ready(Ok(ChunkedState::EndCr)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size LF",
))),
}
}
fn read_body(
rdr: &mut BytesMut,
rem: &mut u64,
buf: &mut Option<Bytes>,
) -> Poll<Result<ChunkedState, io::Error>> {
log::trace!("Chunked read, remaining={:?}", rem);
let len = rdr.len() as u64;
if len == 0 {
Poll::Ready(Ok(ChunkedState::Body))
} else {
let slice;
if *rem > len {
slice = rdr.split().freeze();
*rem -= len;
} else {
slice = rdr.split_to(*rem as usize).freeze();
*rem = 0;
}
*buf = Some(slice);
if *rem > 0 {
Poll::Ready(Ok(ChunkedState::Body))
} else {
Poll::Ready(Ok(ChunkedState::BodyCr))
}
}
}
fn read_body_cr(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Poll::Ready(Ok(ChunkedState::BodyLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk body CR",
))),
}
}
fn read_body_lf(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' => Poll::Ready(Ok(ChunkedState::Size)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk body LF",
))),
}
}
fn read_end_cr(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Poll::Ready(Ok(ChunkedState::EndLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk end CR",
))),
}
}
fn read_end_lf(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' => Poll::Ready(Ok(ChunkedState::End)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk end LF",
))),
}
}
}
#[cfg(test)]
mod tests {
use actix_codec::Decoder as _;
use bytes::{Bytes, BytesMut};
use http::Method;
use crate::{
error::ParseError,
h1::decoder::{MessageDecoder, PayloadItem},
HttpMessage as _, Request,
};
macro_rules! parse_ready {
($e:expr) => {{
match MessageDecoder::<Request>::default().decode($e) {
Ok(Some((msg, _))) => msg,
Ok(_) => unreachable!("Eof during parsing http request"),
Err(err) => unreachable!("Error during parsing http request: {:?}", err),
}
}};
}
macro_rules! expect_parse_err {
($e:expr) => {{
match MessageDecoder::<Request>::default().decode($e) {
Err(err) => match err {
ParseError::Io(_) => unreachable!("Parse error expected"),
_ => {}
},
_ => unreachable!("Error expected"),
}
}};
}
#[test]
fn test_parse_chunked_payload_chunk_extension() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\
\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(msg.chunked().unwrap());
buf.extend(b"4;test\r\ndata\r\n4\r\nline\r\n0\r\n\r\n"); // test: test\r\n\r\n")
let chunk = pl.decode(&mut buf).unwrap().unwrap().chunk();
assert_eq!(chunk, Bytes::from_static(b"data"));
let chunk = pl.decode(&mut buf).unwrap().unwrap().chunk();
assert_eq!(chunk, Bytes::from_static(b"line"));
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert!(msg.eof());
}
#[test]
fn test_request_chunked() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let req = parse_ready!(&mut buf);
if let Ok(val) = req.chunked() {
assert!(val);
} else {
unreachable!("Error");
}
// intentional typo in "chunked"
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chnked\r\n\r\n",
);
expect_parse_err!(&mut buf);
}
#[test]
fn test_http_request_chunked_payload() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(req.chunked().unwrap());
buf.extend(b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n");
assert_eq!(
pl.decode(&mut buf).unwrap().unwrap().chunk().as_ref(),
b"data"
);
assert_eq!(
pl.decode(&mut buf).unwrap().unwrap().chunk().as_ref(),
b"line"
);
assert!(pl.decode(&mut buf).unwrap().unwrap().eof());
}
#[test]
fn test_http_request_chunked_payload_and_next_message() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(req.chunked().unwrap());
buf.extend(
b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n\
POST /test2 HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n"
.iter(),
);
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"data");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"line");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert!(msg.eof());
let (req, _) = reader.decode(&mut buf).unwrap().unwrap();
assert!(req.chunked().unwrap());
assert_eq!(*req.method(), Method::POST);
assert!(req.chunked().unwrap());
}
#[test]
fn test_http_request_chunked_payload_chunks() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(req.chunked().unwrap());
buf.extend(b"4\r\n1111\r\n");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"1111");
buf.extend(b"4\r\ndata\r");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"data");
buf.extend(b"\n4");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"\r");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"\n");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"li");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"li");
//trailers
//buf.feed_data("test: test\r\n");
//not_ready!(reader.parse(&mut buf, &mut readbuf));
buf.extend(b"ne\r\n0\r\n");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"ne");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"\r\n");
assert!(pl.decode(&mut buf).unwrap().unwrap().eof());
}
#[test]
fn chunk_extension_quoted() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
Host: localhost:8080\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
2;hello=b;one=\"1 2 3\"\r\n\
xx",
);
let mut reader = MessageDecoder::<Request>::default();
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
let chunk = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(chunk, PayloadItem::Chunk(Bytes::from_static(b"xx")));
}
#[test]
fn hrs_chunk_extension_invalid() {
let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\
Host: localhost:8080\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
2;x\nx\r\n\
4c\r\n\
0\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
let err = pl.decode(&mut buf).unwrap_err();
assert!(err
.to_string()
.contains("Invalid character in chunk extension"));
}
#[test]
fn hrs_chunk_size_overflow() {
let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\
Host: example.com\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
f0000000000000003\r\n\
abc\r\n\
0\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
let err = pl.decode(&mut buf).unwrap_err();
assert!(err
.to_string()
.contains("Invalid chunk size line: Size is too big"));
}
}

View File

@ -1,23 +1,25 @@
use std::io;
use std::{fmt, io};
use actix_codec::{Decoder, Encoder};
use bitflags::bitflags;
use bytes::{Bytes, BytesMut};
use http::{Method, Version};
use super::decoder::{PayloadDecoder, PayloadItem, PayloadType};
use super::{decoder, encoder, reserve_readbuf};
use super::{Message, MessageType};
use crate::body::BodySize;
use crate::config::ServiceConfig;
use crate::error::{ParseError, PayloadError};
use crate::message::{ConnectionType, RequestHeadType, ResponseHead};
use super::{
decoder::{self, PayloadDecoder, PayloadItem, PayloadType},
encoder, reserve_readbuf, Message, MessageType,
};
use crate::{
body::BodySize,
error::{ParseError, PayloadError},
ConnectionType, RequestHeadType, ResponseHead, ServiceConfig,
};
bitflags! {
struct Flags: u8 {
const HEAD = 0b0000_0001;
const KEEPALIVE_ENABLED = 0b0000_1000;
const STREAM = 0b0001_0000;
const HEAD = 0b0000_0001;
const KEEP_ALIVE_ENABLED = 0b0000_1000;
const STREAM = 0b0001_0000;
}
}
@ -36,7 +38,7 @@ struct ClientCodecInner {
decoder: decoder::MessageDecoder<ResponseHead>,
payload: Option<PayloadDecoder>,
version: Version,
ctype: ConnectionType,
conn_type: ConnectionType,
// encoder part
flags: Flags,
@ -49,23 +51,32 @@ impl Default for ClientCodec {
}
}
impl fmt::Debug for ClientCodec {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("h1::ClientCodec")
.field("flags", &self.inner.flags)
.finish_non_exhaustive()
}
}
impl ClientCodec {
/// Create HTTP/1 codec.
///
/// `keepalive_enabled` how response `connection` header get generated.
pub fn new(config: ServiceConfig) -> Self {
let flags = if config.keep_alive_enabled() {
Flags::KEEPALIVE_ENABLED
let flags = if config.keep_alive().enabled() {
Flags::KEEP_ALIVE_ENABLED
} else {
Flags::empty()
};
ClientCodec {
inner: ClientCodecInner {
config,
decoder: decoder::MessageDecoder::default(),
payload: None,
version: Version::HTTP_11,
ctype: ConnectionType::Close,
conn_type: ConnectionType::Close,
flags,
encoder: encoder::MessageEncoder::default(),
@ -75,12 +86,12 @@ impl ClientCodec {
/// Check if request is upgrade
pub fn upgrade(&self) -> bool {
self.inner.ctype == ConnectionType::Upgrade
self.inner.conn_type == ConnectionType::Upgrade
}
/// Check if last response is keep-alive
pub fn keepalive(&self) -> bool {
self.inner.ctype == ConnectionType::KeepAlive
pub fn keep_alive(&self) -> bool {
self.inner.conn_type == ConnectionType::KeepAlive
}
/// Check last request's message type
@ -102,8 +113,8 @@ impl ClientCodec {
impl ClientPayloadCodec {
/// Check if last response is keep-alive
pub fn keepalive(&self) -> bool {
self.inner.ctype == ConnectionType::KeepAlive
pub fn keep_alive(&self) -> bool {
self.inner.conn_type == ConnectionType::KeepAlive
}
/// Transform payload codec to a message codec
@ -117,15 +128,18 @@ impl Decoder for ClientCodec {
type Error = ParseError;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
debug_assert!(!self.inner.payload.is_some(), "Payload decoder is set");
debug_assert!(
self.inner.payload.is_none(),
"Payload decoder should not be set"
);
if let Some((req, payload)) = self.inner.decoder.decode(src)? {
if let Some(ctype) = req.ctype() {
if let Some(conn_type) = req.conn_type() {
// do not use peer's keep-alive
self.inner.ctype = if ctype == ConnectionType::KeepAlive {
self.inner.ctype
self.inner.conn_type = if conn_type == ConnectionType::KeepAlive {
self.inner.conn_type
} else {
ctype
conn_type
};
}
@ -190,9 +204,9 @@ impl Encoder<Message<(RequestHeadType, BodySize)>> for ClientCodec {
.set(Flags::HEAD, head.as_ref().method == Method::HEAD);
// connection status
inner.ctype = match head.as_ref().connection_type() {
inner.conn_type = match head.as_ref().connection_type() {
ConnectionType::KeepAlive => {
if inner.flags.contains(Flags::KEEPALIVE_ENABLED) {
if inner.flags.contains(Flags::KEEP_ALIVE_ENABLED) {
ConnectionType::KeepAlive
} else {
ConnectionType::Close
@ -209,7 +223,7 @@ impl Encoder<Message<(RequestHeadType, BodySize)>> for ClientCodec {
false,
inner.version,
length,
inner.ctype,
inner.conn_type,
&inner.config,
)?;
}
@ -223,15 +237,3 @@ impl Encoder<Message<(RequestHeadType, BodySize)>> for ClientCodec {
Ok(())
}
}
pub struct Writer<'a>(pub &'a mut BytesMut);
impl<'a> io::Write for Writer<'a> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.extend_from_slice(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}

View File

@ -5,21 +5,19 @@ use bitflags::bitflags;
use bytes::BytesMut;
use http::{Method, Version};
use super::decoder::{PayloadDecoder, PayloadItem, PayloadType};
use super::{decoder, encoder};
use super::{Message, MessageType};
use crate::body::BodySize;
use crate::config::ServiceConfig;
use crate::error::ParseError;
use crate::message::ConnectionType;
use crate::request::Request;
use crate::response::Response;
use super::{
decoder::{self, PayloadDecoder, PayloadItem, PayloadType},
encoder, Message, MessageType,
};
use crate::{
body::BodySize, error::ParseError, ConnectionType, Request, Response, ServiceConfig,
};
bitflags! {
struct Flags: u8 {
const HEAD = 0b0000_0001;
const KEEPALIVE_ENABLED = 0b0000_0010;
const STREAM = 0b0000_0100;
const HEAD = 0b0000_0001;
const KEEP_ALIVE_ENABLED = 0b0000_0010;
const STREAM = 0b0000_0100;
}
}
@ -29,7 +27,7 @@ pub struct Codec {
decoder: decoder::MessageDecoder<Request>,
payload: Option<PayloadDecoder>,
version: Version,
ctype: ConnectionType,
conn_type: ConnectionType,
// encoder part
flags: Flags,
@ -44,7 +42,9 @@ impl Default for Codec {
impl fmt::Debug for Codec {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "h1::Codec({:?})", self.flags)
f.debug_struct("h1::Codec")
.field("flags", &self.flags)
.finish_non_exhaustive()
}
}
@ -53,8 +53,8 @@ impl Codec {
///
/// `keepalive_enabled` how response `connection` header get generated.
pub fn new(config: ServiceConfig) -> Self {
let flags = if config.keep_alive_enabled() {
Flags::KEEPALIVE_ENABLED
let flags = if config.keep_alive().enabled() {
Flags::KEEP_ALIVE_ENABLED
} else {
Flags::empty()
};
@ -65,7 +65,7 @@ impl Codec {
decoder: decoder::MessageDecoder::default(),
payload: None,
version: Version::HTTP_11,
ctype: ConnectionType::Close,
conn_type: ConnectionType::Close,
encoder: encoder::MessageEncoder::default(),
}
}
@ -73,19 +73,19 @@ impl Codec {
/// Check if request is upgrade.
#[inline]
pub fn upgrade(&self) -> bool {
self.ctype == ConnectionType::Upgrade
self.conn_type == ConnectionType::Upgrade
}
/// Check if last response is keep-alive.
#[inline]
pub fn keepalive(&self) -> bool {
self.ctype == ConnectionType::KeepAlive
pub fn keep_alive(&self) -> bool {
self.conn_type == ConnectionType::KeepAlive
}
/// Check if keep-alive enabled on server level.
#[inline]
pub fn keepalive_enabled(&self) -> bool {
self.flags.contains(Flags::KEEPALIVE_ENABLED)
pub fn keep_alive_enabled(&self) -> bool {
self.flags.contains(Flags::KEEP_ALIVE_ENABLED)
}
/// Check last request's message type.
@ -111,8 +111,8 @@ impl Decoder for Codec {
type Error = ParseError;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
if self.payload.is_some() {
Ok(match self.payload.as_mut().unwrap().decode(src)? {
if let Some(ref mut payload) = self.payload {
Ok(match payload.decode(src)? {
Some(PayloadItem::Chunk(chunk)) => Some(Message::Chunk(Some(chunk))),
Some(PayloadItem::Eof) => {
self.payload.take();
@ -124,12 +124,14 @@ impl Decoder for Codec {
let head = req.head();
self.flags.set(Flags::HEAD, head.method == Method::HEAD);
self.version = head.version;
self.ctype = head.connection_type();
if self.ctype == ConnectionType::KeepAlive
&& !self.flags.contains(Flags::KEEPALIVE_ENABLED)
self.conn_type = head.connection_type();
if self.conn_type == ConnectionType::KeepAlive
&& !self.flags.contains(Flags::KEEP_ALIVE_ENABLED)
{
self.ctype = ConnectionType::Close
self.conn_type = ConnectionType::Close
}
match payload {
PayloadType::None => self.payload = None,
PayloadType::Payload(pl) => self.payload = Some(pl),
@ -159,14 +161,14 @@ impl Encoder<Message<(Response<()>, BodySize)>> for Codec {
res.head_mut().version = self.version;
// connection status
self.ctype = if let Some(ct) = res.head().ctype() {
self.conn_type = if let Some(ct) = res.head().conn_type() {
if ct == ConnectionType::KeepAlive {
self.ctype
self.conn_type
} else {
ct
}
} else {
self.ctype
self.conn_type
};
// encode message
@ -177,18 +179,20 @@ impl Encoder<Message<(Response<()>, BodySize)>> for Codec {
self.flags.contains(Flags::STREAM),
self.version,
length,
self.ctype,
self.conn_type,
&self.config,
)?;
// self.headers_size = (dst.len() - len) as u32;
}
Message::Chunk(Some(bytes)) => {
self.encoder.encode_chunk(bytes.as_ref(), dst)?;
}
Message::Chunk(None) => {
self.encoder.encode_eof(dst)?;
}
}
Ok(())
}
}
@ -199,10 +203,10 @@ mod tests {
use http::Method;
use super::*;
use crate::httpmessage::HttpMessage;
use crate::HttpMessage as _;
#[test]
fn test_http_request_chunked_payload_and_next_message() {
#[actix_rt::test]
async fn test_http_request_chunked_payload_and_next_message() {
let mut codec = Codec::default();
let mut buf = BytesMut::from(

View File

@ -1,20 +1,17 @@
use std::convert::TryFrom;
use std::io;
use std::marker::PhantomData;
use std::task::Poll;
use std::{convert::TryFrom, io, marker::PhantomData, mem::MaybeUninit, task::Poll};
use actix_codec::Decoder;
use bytes::{Buf, Bytes, BytesMut};
use http::header::{HeaderName, HeaderValue};
use http::{header, Method, StatusCode, Uri, Version};
use bytes::{Bytes, BytesMut};
use http::{
header::{self, HeaderName, HeaderValue},
Method, StatusCode, Uri, Version,
};
use log::{debug, error, trace};
use crate::error::ParseError;
use crate::header::HeaderMap;
use crate::message::{ConnectionType, ResponseHead};
use crate::request::Request;
use super::chunked::ChunkedState;
use crate::{error::ParseError, header::HeaderMap, ConnectionType, Request, ResponseHead};
const MAX_BUFFER_SIZE: usize = 131_072;
pub(crate) const MAX_BUFFER_SIZE: usize = 131_072;
const MAX_HEADERS: usize = 96;
/// Incoming message decoder
@ -50,7 +47,7 @@ pub(crate) enum PayloadLength {
}
pub(crate) trait MessageType: Sized {
fn set_connection_type(&mut self, ctype: Option<ConnectionType>);
fn set_connection_type(&mut self, conn_type: Option<ConnectionType>);
fn set_expect(&mut self);
@ -67,14 +64,14 @@ pub(crate) trait MessageType: Sized {
let mut has_upgrade_websocket = false;
let mut expect = false;
let mut chunked = false;
let mut seen_te = false;
let mut content_length = None;
{
let headers = self.headers_mut();
for idx in raw_headers.iter() {
let name =
HeaderName::from_bytes(&slice[idx.name.0..idx.name.1]).unwrap();
let name = HeaderName::from_bytes(&slice[idx.name.0..idx.name.1]).unwrap();
// SAFETY: httparse already checks header value is only visible ASCII bytes
// from_maybe_shared_unchecked contains debug assertions so they are omitted here
@ -85,8 +82,17 @@ pub(crate) trait MessageType: Sized {
};
match name {
header::CONTENT_LENGTH => {
if let Ok(s) = value.to_str() {
header::CONTENT_LENGTH if content_length.is_some() => {
debug!("multiple Content-Length");
return Err(ParseError::Header);
}
header::CONTENT_LENGTH => match value.to_str() {
Ok(s) if s.trim().starts_with('+') => {
debug!("illegal Content-Length: {:?}", s);
return Err(ParseError::Header);
}
Ok(s) => {
if let Ok(len) = s.parse::<u64>() {
if len != 0 {
content_length = Some(len);
@ -95,22 +101,38 @@ pub(crate) trait MessageType: Sized {
debug!("illegal Content-Length: {:?}", s);
return Err(ParseError::Header);
}
} else {
}
Err(_) => {
debug!("illegal Content-Length: {:?}", value);
return Err(ParseError::Header);
}
}
},
// transfer-encoding
header::TRANSFER_ENCODING if seen_te => {
debug!("multiple Transfer-Encoding not allowed");
return Err(ParseError::Header);
}
header::TRANSFER_ENCODING => {
if let Ok(s) = value.to_str().map(|s| s.trim()) {
chunked = s.eq_ignore_ascii_case("chunked");
seen_te = true;
if let Ok(s) = value.to_str().map(str::trim) {
if s.eq_ignore_ascii_case("chunked") {
chunked = true;
} else if s.eq_ignore_ascii_case("identity") {
// allow silently since multiple TE headers are already checked
} else {
debug!("illegal Transfer-Encoding: {:?}", s);
return Err(ParseError::Header);
}
} else {
return Err(ParseError::Header);
}
}
// connection keep-alive state
header::CONNECTION => {
ka = if let Ok(conn) = value.to_str().map(|conn| conn.trim()) {
ka = if let Ok(conn) = value.to_str().map(str::trim) {
if conn.eq_ignore_ascii_case("keep-alive") {
Some(ConnectionType::KeepAlive)
} else if conn.eq_ignore_ascii_case("close") {
@ -125,7 +147,7 @@ pub(crate) trait MessageType: Sized {
};
}
header::UPGRADE => {
if let Ok(val) = value.to_str().map(|val| val.trim()) {
if let Ok(val) = value.to_str().map(str::trim) {
if val.eq_ignore_ascii_case("websocket") {
has_upgrade_websocket = true;
}
@ -137,7 +159,7 @@ pub(crate) trait MessageType: Sized {
expect = true;
}
}
_ => (),
_ => {}
}
headers.append(name, value);
@ -148,7 +170,7 @@ pub(crate) trait MessageType: Sized {
self.set_expect()
}
// https://tools.ietf.org/html/rfc7230#section-3.3.3
// https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.3
if chunked {
// Chunked encoding
Ok(PayloadLength::Payload(PayloadType::Payload(
@ -168,8 +190,8 @@ pub(crate) trait MessageType: Sized {
}
impl MessageType for Request {
fn set_connection_type(&mut self, ctype: Option<ConnectionType>) {
if let Some(ctype) = ctype {
fn set_connection_type(&mut self, conn_type: Option<ConnectionType>) {
if let Some(ctype) = conn_type {
self.head_mut().set_connection_type(ctype);
}
}
@ -186,10 +208,18 @@ impl MessageType for Request {
let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY;
let (len, method, uri, ver, h_len) = {
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = EMPTY_HEADER_ARRAY;
// SAFETY:
// Create an uninitialized array of `MaybeUninit`. The `assume_init` is safe because the
// type we are claiming to have initialized here is a bunch of `MaybeUninit`s, which
// do not require initialization.
let mut parsed = unsafe {
MaybeUninit::<[MaybeUninit<httparse::Header<'_>>; MAX_HEADERS]>::uninit()
.assume_init()
};
let mut req = httparse::Request::new(&mut parsed);
match req.parse(src)? {
let mut req = httparse::Request::new(&mut []);
match req.parse_with_uninit_headers(src, &mut parsed)? {
httparse::Status::Complete(len) => {
let method = Method::from_bytes(req.method.unwrap().as_bytes())
.map_err(|_| ParseError::Method)?;
@ -203,7 +233,16 @@ impl MessageType for Request {
(len, method, uri, version, req.headers.len())
}
httparse::Status::Partial => return Ok(None),
httparse::Status::Partial => {
return if src.len() >= MAX_BUFFER_SIZE {
trace!("MAX_BUFFER_SIZE unprocessed data reached, closing");
Err(ParseError::TooLarge)
} else {
// Return None to notify more read are needed for parsing request
Ok(None)
};
}
}
};
@ -216,15 +255,12 @@ impl MessageType for Request {
let decoder = match length {
PayloadLength::Payload(pl) => pl,
PayloadLength::UpgradeWebSocket => {
// upgrade(websocket)
// upgrade (WebSocket)
PayloadType::Stream(PayloadDecoder::eof())
}
PayloadLength::None => {
if method == Method::CONNECT {
PayloadType::Stream(PayloadDecoder::eof())
} else if src.len() >= MAX_BUFFER_SIZE {
trace!("MAX_BUFFER_SIZE unprocessed data reached, closing");
return Err(ParseError::TooLarge);
} else {
PayloadType::None
}
@ -241,8 +277,8 @@ impl MessageType for Request {
}
impl MessageType for ResponseHead {
fn set_connection_type(&mut self, ctype: Option<ConnectionType>) {
if let Some(ctype) = ctype {
fn set_connection_type(&mut self, conn_type: Option<ConnectionType>) {
if let Some(ctype) = conn_type {
ResponseHead::set_connection_type(self, ctype);
}
}
@ -273,7 +309,14 @@ impl MessageType for ResponseHead {
(len, version, status, res.headers.len())
}
httparse::Status::Partial => return Ok(None),
httparse::Status::Partial => {
return if src.len() >= MAX_BUFFER_SIZE {
error!("MAX_BUFFER_SIZE unprocessed data reached, closing");
Err(ParseError::TooLarge)
} else {
Ok(None)
}
}
}
};
@ -289,9 +332,6 @@ impl MessageType for ResponseHead {
} else if status == StatusCode::SWITCHING_PROTOCOLS {
// switching protocol or connect
PayloadType::Stream(PayloadDecoder::eof())
} else if src.len() >= MAX_BUFFER_SIZE {
error!("MAX_BUFFER_SIZE unprocessed data reached, closing");
return Err(ParseError::TooLarge);
} else {
// for HTTP/1.0 read to eof and close connection
if msg.version == Version::HTTP_10 {
@ -342,34 +382,36 @@ impl HeaderIndex {
}
#[derive(Debug, Clone, PartialEq)]
/// Http payload item
/// Chunk type yielded while decoding a payload.
pub enum PayloadItem {
Chunk(Bytes),
Eof,
}
/// Decoders to handle different Transfer-Encodings.
/// Decoder that can handle different payload types.
///
/// If a message body does not include a Transfer-Encoding, it *should*
/// include a Content-Length header.
/// If a message body does not use `Transfer-Encoding`, it should include a `Content-Length`.
#[derive(Debug, Clone, PartialEq)]
pub struct PayloadDecoder {
kind: Kind,
}
impl PayloadDecoder {
/// Constructs a fixed-length payload decoder.
pub fn length(x: u64) -> PayloadDecoder {
PayloadDecoder {
kind: Kind::Length(x),
}
}
/// Constructs a chunked encoding decoder.
pub fn chunked() -> PayloadDecoder {
PayloadDecoder {
kind: Kind::Chunked(ChunkedState::Size, 0),
}
}
/// Creates an decoder that yields chunks until the stream returns EOF.
pub fn eof() -> PayloadDecoder {
PayloadDecoder { kind: Kind::Eof }
}
@ -377,40 +419,27 @@ impl PayloadDecoder {
#[derive(Debug, Clone, PartialEq)]
enum Kind {
/// A Reader used when a Content-Length header is passed with a positive
/// integer.
/// A reader used when a `Content-Length` header is passed with a positive integer.
Length(u64),
/// A Reader used when Transfer-Encoding is `chunked`.
Chunked(ChunkedState, u64),
/// A Reader used for responses that don't indicate a length or chunked.
///
/// Note: This should only used for `Response`s. It is illegal for a
/// `Request` to be made with both `Content-Length` and
/// `Transfer-Encoding: chunked` missing, as explained from the spec:
///
/// > If a Transfer-Encoding header field is present in a response and
/// > the chunked transfer coding is not the final encoding, the
/// > message body length is determined by reading the connection until
/// > it is closed by the server. If a Transfer-Encoding header field
/// > is present in a request and the chunked transfer coding is not
/// > the final encoding, the message body length cannot be determined
/// > reliably; the server MUST respond with the 400 (Bad Request)
/// > status code and then close the connection.
Eof,
}
#[derive(Debug, PartialEq, Clone)]
enum ChunkedState {
Size,
SizeLws,
Extension,
SizeLf,
Body,
BodyCr,
BodyLf,
EndCr,
EndLf,
End,
/// A reader used when `Transfer-Encoding` is `chunked`.
Chunked(ChunkedState, u64),
/// A reader used for responses that don't indicate a length or chunked.
///
/// Note: This should only used for `Response`s. It is illegal for a `Request` to be made
/// without either of `Content-Length` and `Transfer-Encoding: chunked` missing, as explained
/// in [RFC 7230 §3.3.3]:
///
/// > If a Transfer-Encoding header field is present in a response and the chunked transfer
/// > coding is not the final encoding, the message body length is determined by reading the
/// > connection until it is closed by the server. If a Transfer-Encoding header field is
/// > present in a request and the chunked transfer coding is not the final encoding, the
/// > message body length cannot be determined reliably; the server MUST respond with the 400
/// > (Bad Request) status code and then close the connection.
///
/// [RFC 7230 §3.3.3]: https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.3
Eof,
}
impl Decoder for PayloadDecoder {
@ -439,27 +468,33 @@ impl Decoder for PayloadDecoder {
Ok(Some(PayloadItem::Chunk(buf)))
}
}
Kind::Chunked(ref mut state, ref mut size) => {
loop {
let mut buf = None;
// advances the chunked state
*state = match state.step(src, size, &mut buf) {
Poll::Pending => return Ok(None),
Poll::Ready(Ok(state)) => state,
Poll::Ready(Err(e)) => return Err(e),
};
if *state == ChunkedState::End {
trace!("End of chunked stream");
return Ok(Some(PayloadItem::Eof));
}
if let Some(buf) = buf {
return Ok(Some(PayloadItem::Chunk(buf)));
}
if src.is_empty() {
return Ok(None);
}
}
}
Kind::Eof => {
if src.is_empty() {
Ok(None)
@ -471,201 +506,40 @@ impl Decoder for PayloadDecoder {
}
}
macro_rules! byte (
($rdr:ident) => ({
if $rdr.len() > 0 {
let b = $rdr[0];
$rdr.advance(1);
b
} else {
return Poll::Pending
}
})
);
impl ChunkedState {
fn step(
&self,
body: &mut BytesMut,
size: &mut u64,
buf: &mut Option<Bytes>,
) -> Poll<Result<ChunkedState, io::Error>> {
use self::ChunkedState::*;
match *self {
Size => ChunkedState::read_size(body, size),
SizeLws => ChunkedState::read_size_lws(body),
Extension => ChunkedState::read_extension(body),
SizeLf => ChunkedState::read_size_lf(body, size),
Body => ChunkedState::read_body(body, size, buf),
BodyCr => ChunkedState::read_body_cr(body),
BodyLf => ChunkedState::read_body_lf(body),
EndCr => ChunkedState::read_end_cr(body),
EndLf => ChunkedState::read_end_lf(body),
End => Poll::Ready(Ok(ChunkedState::End)),
}
}
fn read_size(
rdr: &mut BytesMut,
size: &mut u64,
) -> Poll<Result<ChunkedState, io::Error>> {
let radix = 16;
match byte!(rdr) {
b @ b'0'..=b'9' => {
*size *= radix;
*size += u64::from(b - b'0');
}
b @ b'a'..=b'f' => {
*size *= radix;
*size += u64::from(b + 10 - b'a');
}
b @ b'A'..=b'F' => {
*size *= radix;
*size += u64::from(b + 10 - b'A');
}
b'\t' | b' ' => return Poll::Ready(Ok(ChunkedState::SizeLws)),
b';' => return Poll::Ready(Ok(ChunkedState::Extension)),
b'\r' => return Poll::Ready(Ok(ChunkedState::SizeLf)),
_ => {
return Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size line: Invalid Size",
)));
}
}
Poll::Ready(Ok(ChunkedState::Size))
}
fn read_size_lws(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
trace!("read_size_lws");
match byte!(rdr) {
// LWS can follow the chunk size, but no more digits can come
b'\t' | b' ' => Poll::Ready(Ok(ChunkedState::SizeLws)),
b';' => Poll::Ready(Ok(ChunkedState::Extension)),
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size linear white space",
))),
}
}
fn read_extension(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
_ => Poll::Ready(Ok(ChunkedState::Extension)), // no supported extensions
}
}
fn read_size_lf(
rdr: &mut BytesMut,
size: &mut u64,
) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' if *size > 0 => Poll::Ready(Ok(ChunkedState::Body)),
b'\n' if *size == 0 => Poll::Ready(Ok(ChunkedState::EndCr)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size LF",
))),
}
}
fn read_body(
rdr: &mut BytesMut,
rem: &mut u64,
buf: &mut Option<Bytes>,
) -> Poll<Result<ChunkedState, io::Error>> {
trace!("Chunked read, remaining={:?}", rem);
let len = rdr.len() as u64;
if len == 0 {
Poll::Ready(Ok(ChunkedState::Body))
} else {
let slice;
if *rem > len {
slice = rdr.split().freeze();
*rem -= len;
} else {
slice = rdr.split_to(*rem as usize).freeze();
*rem = 0;
}
*buf = Some(slice);
if *rem > 0 {
Poll::Ready(Ok(ChunkedState::Body))
} else {
Poll::Ready(Ok(ChunkedState::BodyCr))
}
}
}
fn read_body_cr(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Poll::Ready(Ok(ChunkedState::BodyLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk body CR",
))),
}
}
fn read_body_lf(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' => Poll::Ready(Ok(ChunkedState::Size)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk body LF",
))),
}
}
fn read_end_cr(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Poll::Ready(Ok(ChunkedState::EndLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk end CR",
))),
}
}
fn read_end_lf(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' => Poll::Ready(Ok(ChunkedState::End)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk end LF",
))),
}
}
}
#[cfg(test)]
mod tests {
use bytes::{Bytes, BytesMut};
use http::{Method, Version};
use super::*;
use crate::error::ParseError;
use crate::http::header::{HeaderName, SET_COOKIE};
use crate::httpmessage::HttpMessage;
use crate::{
error::ParseError,
header::{HeaderName, SET_COOKIE},
HttpMessage as _,
};
impl PayloadType {
fn unwrap(self) -> PayloadDecoder {
pub(crate) fn unwrap(self) -> PayloadDecoder {
match self {
PayloadType::Payload(pl) => pl,
_ => panic!(),
}
}
fn is_unhandled(&self) -> bool {
pub(crate) fn is_unhandled(&self) -> bool {
matches!(self, PayloadType::Stream(_))
}
}
impl PayloadItem {
fn chunk(self) -> Bytes {
pub(crate) fn chunk(self) -> Bytes {
match self {
PayloadItem::Chunk(chunk) => chunk,
_ => panic!("error"),
}
}
fn eof(&self) -> bool {
pub(crate) fn eof(&self) -> bool {
matches!(*self, PayloadItem::Eof)
}
}
@ -685,7 +559,7 @@ mod tests {
match MessageDecoder::<Request>::default().decode($e) {
Err(err) => match err {
ParseError::Io(_) => unreachable!("Parse error expected"),
_ => (),
_ => {}
},
_ => unreachable!("Error expected"),
}
@ -734,8 +608,7 @@ mod tests {
#[test]
fn test_parse_body() {
let mut buf =
BytesMut::from("GET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody");
let mut buf = BytesMut::from("GET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody");
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
@ -751,8 +624,7 @@ mod tests {
#[test]
fn test_parse_body_crlf() {
let mut buf =
BytesMut::from("\r\nGET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody");
let mut buf = BytesMut::from("\r\nGET /test HTTP/1.1\r\nContent-Length: 4\r\n\r\nbody");
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
@ -821,8 +693,8 @@ mod tests {
.get_all(SET_COOKIE)
.map(|v| v.to_str().unwrap().to_owned())
.collect();
assert_eq!(val[1], "c1=cookie1");
assert_eq!(val[0], "c2=cookie2");
assert_eq!(val[0], "c1=cookie1");
assert_eq!(val[1], "c2=cookie2");
}
#[test]
@ -958,34 +830,6 @@ mod tests {
assert!(req.upgrade());
}
#[test]
fn test_request_chunked() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let req = parse_ready!(&mut buf);
if let Ok(val) = req.chunked() {
assert!(val);
} else {
unreachable!("Error");
}
// intentional typo in "chunked"
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chnked\r\n\r\n",
);
let req = parse_ready!(&mut buf);
if let Ok(val) = req.chunked() {
assert!(!val);
} else {
unreachable!("Error");
}
}
#[test]
fn test_headers_content_length_err_1() {
let mut buf = BytesMut::from(
@ -1103,128 +947,9 @@ mod tests {
expect_parse_err!(&mut buf);
}
#[test]
fn test_http_request_chunked_payload() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(req.chunked().unwrap());
buf.extend(b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n");
assert_eq!(
pl.decode(&mut buf).unwrap().unwrap().chunk().as_ref(),
b"data"
);
assert_eq!(
pl.decode(&mut buf).unwrap().unwrap().chunk().as_ref(),
b"line"
);
assert!(pl.decode(&mut buf).unwrap().unwrap().eof());
}
#[test]
fn test_http_request_chunked_payload_and_next_message() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(req.chunked().unwrap());
buf.extend(
b"4\r\ndata\r\n4\r\nline\r\n0\r\n\r\n\
POST /test2 HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n"
.iter(),
);
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"data");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"line");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert!(msg.eof());
let (req, _) = reader.decode(&mut buf).unwrap().unwrap();
assert!(req.chunked().unwrap());
assert_eq!(*req.method(), Method::POST);
assert!(req.chunked().unwrap());
}
#[test]
fn test_http_request_chunked_payload_chunks() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(req.chunked().unwrap());
buf.extend(b"4\r\n1111\r\n");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"1111");
buf.extend(b"4\r\ndata\r");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"data");
buf.extend(b"\n4");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"\r");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"\n");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"li");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"li");
//trailers
//buf.feed_data("test: test\r\n");
//not_ready!(reader.parse(&mut buf, &mut readbuf));
buf.extend(b"ne\r\n0\r\n");
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(msg.chunk().as_ref(), b"ne");
assert!(pl.decode(&mut buf).unwrap().is_none());
buf.extend(b"\r\n");
assert!(pl.decode(&mut buf).unwrap().unwrap().eof());
}
#[test]
fn test_parse_chunked_payload_chunk_extension() {
let mut buf = BytesMut::from(
&"GET /test HTTP/1.1\r\n\
transfer-encoding: chunked\r\n\r\n"[..],
);
let mut reader = MessageDecoder::<Request>::default();
let (msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
assert!(msg.chunked().unwrap());
buf.extend(b"4;test\r\ndata\r\n4\r\nline\r\n0\r\n\r\n"); // test: test\r\n\r\n")
let chunk = pl.decode(&mut buf).unwrap().unwrap().chunk();
assert_eq!(chunk, Bytes::from_static(b"data"));
let chunk = pl.decode(&mut buf).unwrap().unwrap().chunk();
assert_eq!(chunk, Bytes::from_static(b"line"));
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert!(msg.eof());
}
#[test]
fn test_response_http10_read_until_eof() {
let mut buf = BytesMut::from(&"HTTP/1.0 200 Ok\r\n\r\ntest data"[..]);
let mut buf = BytesMut::from("HTTP/1.0 200 Ok\r\n\r\ntest data");
let mut reader = MessageDecoder::<ResponseHead>::default();
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
@ -1233,4 +958,84 @@ mod tests {
let chunk = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(chunk, PayloadItem::Chunk(Bytes::from_static(b"test data")));
}
#[test]
fn hrs_multiple_content_length() {
let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\
Host: example.com\r\n\
Content-Length: 4\r\n\
Content-Length: 2\r\n\
\r\n\
abcd",
);
expect_parse_err!(&mut buf);
}
#[test]
fn hrs_content_length_plus() {
let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\
Host: example.com\r\n\
Content-Length: +3\r\n\
\r\n\
000",
);
expect_parse_err!(&mut buf);
}
#[test]
fn hrs_unknown_transfer_encoding() {
let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\
Host: example.com\r\n\
Transfer-Encoding: JUNK\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
5\r\n\
hello\r\n\
0",
);
expect_parse_err!(&mut buf);
}
#[test]
fn hrs_multiple_transfer_encoding() {
let mut buf = BytesMut::from(
"GET / HTTP/1.1\r\n\
Host: example.com\r\n\
Content-Length: 51\r\n\
Transfer-Encoding: identity\r\n\
Transfer-Encoding: chunked\r\n\
\r\n\
0\r\n\
\r\n\
GET /forbidden HTTP/1.1\r\n\
Host: example.com\r\n\r\n",
);
expect_parse_err!(&mut buf);
}
#[test]
fn transfer_encoding_agrees() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
Host: example.com\r\n\
Content-Length: 3\r\n\
Transfer-Encoding: identity\r\n\
\r\n\
0\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
let chunk = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(chunk, PayloadItem::Chunk(Bytes::from_static(b"0\r\n")));
}
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,973 @@
use std::{future::Future, str, task::Poll, time::Duration};
use actix_rt::{pin, time::sleep};
use actix_service::fn_service;
use actix_utils::future::{ready, Ready};
use bytes::Bytes;
use futures_util::future::lazy;
use actix_codec::Framed;
use actix_service::Service;
use bytes::{Buf, BytesMut};
use super::dispatcher::{Dispatcher, DispatcherState, DispatcherStateProj, Flags};
use crate::{
body::MessageBody,
config::ServiceConfig,
h1::{Codec, ExpectHandler, UpgradeHandler},
service::HttpFlow,
test::{TestBuffer, TestSeqBuffer},
Error, HttpMessage, KeepAlive, Method, OnConnectData, Request, Response, StatusCode,
};
fn find_slice(haystack: &[u8], needle: &[u8], from: usize) -> Option<usize> {
memchr::memmem::find(&haystack[from..], needle)
}
fn stabilize_date_header(payload: &mut [u8]) {
let mut from = 0;
while let Some(pos) = find_slice(payload, b"date", from) {
payload[(from + pos)..(from + pos + 35)]
.copy_from_slice(b"date: Thu, 01 Jan 1970 12:34:56 UTC");
from += 35;
}
}
fn ok_service() -> impl Service<Request, Response = Response<impl MessageBody>, Error = Error> {
status_service(StatusCode::OK)
}
fn status_service(
status: StatusCode,
) -> impl Service<Request, Response = Response<impl MessageBody>, Error = Error> {
fn_service(move |_req: Request| ready(Ok::<_, Error>(Response::new(status))))
}
fn echo_path_service(
) -> impl Service<Request, Response = Response<impl MessageBody>, Error = Error> {
fn_service(|req: Request| {
let path = req.path().as_bytes();
ready(Ok::<_, Error>(
Response::ok().set_body(Bytes::copy_from_slice(path)),
))
})
}
fn drop_payload_service(
) -> impl Service<Request, Response = Response<&'static str>, Error = Error> {
fn_service(|mut req: Request| async move {
let _ = req.take_payload();
Ok::<_, Error>(Response::with_body(StatusCode::OK, "payload dropped"))
})
}
fn echo_payload_service() -> impl Service<Request, Response = Response<Bytes>, Error = Error> {
fn_service(|mut req: Request| {
Box::pin(async move {
use futures_util::stream::StreamExt as _;
let mut pl = req.take_payload();
let mut body = BytesMut::new();
while let Some(chunk) = pl.next().await {
body.extend_from_slice(chunk.unwrap().chunk())
}
Ok::<_, Error>(Response::ok().set_body(body.freeze()))
})
})
}
#[actix_rt::test]
async fn late_request() {
let mut buf = TestBuffer::empty();
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::from_millis(100),
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(ok_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
lazy(|cx| {
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
match h1.as_mut().poll(cx) {
Poll::Ready(_) => panic!("first poll should not be ready"),
Poll::Pending => {}
}
// polls: initial
assert_eq!(h1.poll_count, 1);
buf.extend_read_buf("GET /abcd HTTP/1.1\r\nConnection: close\r\n\r\n");
match h1.as_mut().poll(cx) {
Poll::Pending => panic!("second poll should not be pending"),
Poll::Ready(res) => assert!(res.is_ok()),
}
// polls: initial pending => handle req => shutdown
assert_eq!(h1.poll_count, 3);
let mut res = buf.take_write_buf().to_vec();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 0\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
}
#[actix_rt::test]
async fn oneshot_connection() {
let buf = TestBuffer::new("GET /abcd HTTP/1.1\r\n\r\n");
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::from_millis(100),
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
lazy(|cx| {
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
match h1.as_mut().poll(cx) {
Poll::Pending => panic!("first poll should not be pending"),
Poll::Ready(res) => assert!(res.is_ok()),
}
// polls: initial => shutdown
assert_eq!(h1.poll_count, 2);
let mut res = buf.take_write_buf().to_vec();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = http_msg(
r"
HTTP/1.1 200 OK
content-length: 5
connection: close
date: Thu, 01 Jan 1970 12:34:56 UTC
/abcd
",
);
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(&exp)
);
})
.await;
}
#[actix_rt::test]
async fn keep_alive_timeout() {
let buf = TestBuffer::new("GET /abcd HTTP/1.1\r\n\r\n");
let cfg = ServiceConfig::new(
KeepAlive::Timeout(Duration::from_millis(200)),
Duration::from_millis(100),
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
lazy(|cx| {
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
assert!(
h1.as_mut().poll(cx).is_pending(),
"keep-alive should prevent poll from resolving"
);
// polls: initial
assert_eq!(h1.poll_count, 1);
let mut res = buf.take_write_buf().to_vec();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/abcd\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
// sleep slightly longer than keep-alive timeout
sleep(Duration::from_millis(250)).await;
lazy(|cx| {
assert!(
h1.as_mut().poll(cx).is_ready(),
"keep-alive should have resolved",
);
// polls: initial => keep-alive wake-up shutdown
assert_eq!(h1.poll_count, 2);
if let DispatcherStateProj::Normal { inner } = h1.project().inner.project() {
// connection closed
assert!(inner.flags.contains(Flags::SHUTDOWN));
assert!(inner.flags.contains(Flags::WRITE_DISCONNECT));
// and nothing added to write buffer
assert!(buf.write_buf_slice().is_empty());
}
})
.await;
}
#[actix_rt::test]
async fn keep_alive_follow_up_req() {
let mut buf = TestBuffer::new("GET /abcd HTTP/1.1\r\n\r\n");
let cfg = ServiceConfig::new(
KeepAlive::Timeout(Duration::from_millis(500)),
Duration::from_millis(100),
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
lazy(|cx| {
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
assert!(
h1.as_mut().poll(cx).is_pending(),
"keep-alive should prevent poll from resolving"
);
// polls: initial
assert_eq!(h1.poll_count, 1);
let mut res = buf.take_write_buf().to_vec();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/abcd\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
// sleep for less than KA timeout
sleep(Duration::from_millis(100)).await;
lazy(|cx| {
assert!(
h1.as_mut().poll(cx).is_pending(),
"keep-alive should not have resolved dispatcher yet",
);
// polls: initial => manual
assert_eq!(h1.poll_count, 2);
if let DispatcherStateProj::Normal { inner } = h1.as_mut().project().inner.project() {
// connection not closed
assert!(!inner.flags.contains(Flags::SHUTDOWN));
assert!(!inner.flags.contains(Flags::WRITE_DISCONNECT));
// and nothing added to write buffer
assert!(buf.write_buf_slice().is_empty());
}
})
.await;
lazy(|cx| {
buf.extend_read_buf(
"\
GET /efg HTTP/1.1\r\n\
Connection: close\r\n\
\r\n\r\n",
);
assert!(
h1.as_mut().poll(cx).is_ready(),
"connection close header should override keep-alive setting",
);
// polls: initial => manual => follow-up req => shutdown
assert_eq!(h1.poll_count, 4);
if let DispatcherStateProj::Normal { inner } = h1.as_mut().project().inner.project() {
// connection closed
assert!(inner.flags.contains(Flags::SHUTDOWN));
assert!(!inner.flags.contains(Flags::WRITE_DISCONNECT));
}
let mut res = buf.take_write_buf().to_vec();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 4\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/efg\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
}
#[actix_rt::test]
async fn req_parse_err() {
lazy(|cx| {
let buf = TestBuffer::new("GET /test HTTP/1\r\n\r\n");
let services = HttpFlow::new(ok_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
ServiceConfig::default(),
None,
OnConnectData::default(),
);
pin!(h1);
match h1.as_mut().poll(cx) {
Poll::Pending => panic!(),
Poll::Ready(res) => assert!(res.is_err()),
}
if let DispatcherStateProj::Normal { inner } = h1.project().inner.project() {
assert!(inner.flags.contains(Flags::READ_DISCONNECT));
assert_eq!(
&buf.write_buf_slice()[..26],
b"HTTP/1.1 400 Bad Request\r\n"
);
}
})
.await;
}
#[actix_rt::test]
async fn pipelining_ok_then_ok() {
lazy(|cx| {
let buf = TestBuffer::new(
"\
GET /abcd HTTP/1.1\r\n\r\n\
GET /def HTTP/1.1\r\n\r\n\
",
);
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::from_millis(1),
Duration::from_millis(1),
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
match h1.as_mut().poll(cx) {
Poll::Pending => panic!("first poll should not be pending"),
Poll::Ready(res) => assert!(res.is_ok()),
}
// polls: initial => shutdown
assert_eq!(h1.poll_count, 2);
let mut res = buf.write_buf_slice_mut();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/abcd\
HTTP/1.1 200 OK\r\n\
content-length: 4\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/def\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
}
#[actix_rt::test]
async fn pipelining_ok_then_bad() {
lazy(|cx| {
let buf = TestBuffer::new(
"\
GET /abcd HTTP/1.1\r\n\r\n\
GET /def HTTP/1\r\n\r\n\
",
);
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::from_millis(1),
Duration::from_millis(1),
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
pin!(h1);
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
match h1.as_mut().poll(cx) {
Poll::Pending => panic!("first poll should not be pending"),
Poll::Ready(res) => assert!(res.is_err()),
}
// polls: initial => shutdown
assert_eq!(h1.poll_count, 1);
let mut res = buf.write_buf_slice_mut();
stabilize_date_header(&mut res);
let res = &res[..];
let exp = b"\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
/abcd\
HTTP/1.1 400 Bad Request\r\n\
content-length: 0\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\r\n\
";
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(exp)
);
})
.await;
}
#[actix_rt::test]
async fn expect_handling() {
lazy(|cx| {
let mut buf = TestSeqBuffer::empty();
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::ZERO,
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(echo_payload_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
buf.extend_read_buf(
"\
POST /upload HTTP/1.1\r\n\
Content-Length: 5\r\n\
Expect: 100-continue\r\n\
\r\n\
",
);
pin!(h1);
assert!(h1.as_mut().poll(cx).is_pending());
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
// polls: manual
assert_eq!(h1.poll_count, 1);
if let DispatcherState::Normal { ref inner } = h1.inner {
let io = inner.io.as_ref().unwrap();
let res = &io.write_buf()[..];
assert_eq!(
str::from_utf8(res).unwrap(),
"HTTP/1.1 100 Continue\r\n\r\n"
);
}
buf.extend_read_buf("12345");
assert!(h1.as_mut().poll(cx).is_ready());
// polls: manual manual shutdown
assert_eq!(h1.poll_count, 3);
if let DispatcherState::Normal { ref inner } = h1.inner {
let io = inner.io.as_ref().unwrap();
let mut res = (&io.write_buf()[..]).to_owned();
stabilize_date_header(&mut res);
assert_eq!(
str::from_utf8(&res).unwrap(),
"\
HTTP/1.1 100 Continue\r\n\
\r\n\
HTTP/1.1 200 OK\r\n\
content-length: 5\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\
\r\n\
12345\
"
);
}
})
.await;
}
#[actix_rt::test]
async fn expect_eager() {
lazy(|cx| {
let mut buf = TestSeqBuffer::empty();
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::ZERO,
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(echo_path_service(), ExpectHandler, None);
let h1 = Dispatcher::<_, _, _, _, UpgradeHandler>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
buf.extend_read_buf(
"\
POST /upload HTTP/1.1\r\n\
Content-Length: 5\r\n\
Expect: 100-continue\r\n\
\r\n\
",
);
pin!(h1);
assert!(h1.as_mut().poll(cx).is_ready());
assert!(matches!(&h1.inner, DispatcherState::Normal { .. }));
// polls: manual shutdown
assert_eq!(h1.poll_count, 2);
if let DispatcherState::Normal { ref inner } = h1.inner {
let io = inner.io.as_ref().unwrap();
let mut res = (&io.write_buf()[..]).to_owned();
stabilize_date_header(&mut res);
// Despite the content-length header and even though the request payload has not
// been sent, this test expects a complete service response since the payload
// is not used at all. The service passed to dispatcher is path echo and doesn't
// consume payload bytes.
assert_eq!(
str::from_utf8(&res).unwrap(),
"\
HTTP/1.1 100 Continue\r\n\
\r\n\
HTTP/1.1 200 OK\r\n\
content-length: 7\r\n\
connection: close\r\n\
date: Thu, 01 Jan 1970 12:34:56 UTC\r\n\
\r\n\
/upload\
"
);
}
})
.await;
}
#[actix_rt::test]
async fn upgrade_handling() {
struct TestUpgrade;
impl<T> Service<(Request, Framed<T, Codec>)> for TestUpgrade {
type Response = ();
type Error = Error;
type Future = Ready<Result<Self::Response, Self::Error>>;
actix_service::always_ready!();
fn call(&self, (req, _framed): (Request, Framed<T, Codec>)) -> Self::Future {
assert_eq!(req.method(), Method::GET);
assert!(req.upgrade());
assert_eq!(req.headers().get("upgrade").unwrap(), "websocket");
ready(Ok(()))
}
}
lazy(|cx| {
let mut buf = TestSeqBuffer::empty();
let cfg = ServiceConfig::new(
KeepAlive::Disabled,
Duration::ZERO,
Duration::ZERO,
false,
None,
);
let services = HttpFlow::new(ok_service(), ExpectHandler, Some(TestUpgrade));
let h1 = Dispatcher::<_, _, _, _, TestUpgrade>::new(
buf.clone(),
services,
cfg,
None,
OnConnectData::default(),
);
buf.extend_read_buf(
"\
GET /ws HTTP/1.1\r\n\
Connection: Upgrade\r\n\
Upgrade: websocket\r\n\
\r\n\
",
);
pin!(h1);
assert!(h1.as_mut().poll(cx).is_ready());
assert!(matches!(&h1.inner, DispatcherState::Upgrade { .. }));
// polls: manual shutdown
assert_eq!(h1.poll_count, 2);
})
.await;
}
#[actix_rt::test]
async fn handler_drop_payload() {
let _ = env_logger::try_init();
let mut buf = TestBuffer::new(http_msg(
r"
POST /drop-payload HTTP/1.1
Content-Length: 3
abc
",
));
let services = HttpFlow::new(
drop_payload_service(),
ExpectHandler,
None::<UpgradeHandler>,
);
let h1 = Dispatcher::new(
buf.clone(),
services,
ServiceConfig::default(),
None,
OnConnectData::default(),
);
pin!(h1);
lazy(|cx| {
assert!(h1.as_mut().poll(cx).is_pending());
// polls: manual
assert_eq!(h1.poll_count, 1);
let mut res = BytesMut::from(buf.take_write_buf().as_ref());
stabilize_date_header(&mut res);
let res = &res[..];
let exp = http_msg(
r"
HTTP/1.1 200 OK
content-length: 15
date: Thu, 01 Jan 1970 12:34:56 UTC
payload dropped
",
);
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(&exp)
);
if let DispatcherStateProj::Normal { inner } = h1.as_mut().project().inner.project() {
assert!(inner.state.is_none());
}
})
.await;
lazy(|cx| {
// add message that claims to have payload longer than provided
buf.extend_read_buf(http_msg(
r"
POST /drop-payload HTTP/1.1
Content-Length: 200
abc
",
));
assert!(h1.as_mut().poll(cx).is_pending());
// polls: manual => manual
assert_eq!(h1.poll_count, 2);
let mut res = BytesMut::from(buf.take_write_buf().as_ref());
stabilize_date_header(&mut res);
let res = &res[..];
// expect response immediately even though request side has not finished reading payload
let exp = http_msg(
r"
HTTP/1.1 200 OK
content-length: 15
date: Thu, 01 Jan 1970 12:34:56 UTC
payload dropped
",
);
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(&exp)
);
})
.await;
lazy(|cx| {
assert!(h1.as_mut().poll(cx).is_ready());
// polls: manual => manual => manual
assert_eq!(h1.poll_count, 3);
let mut res = BytesMut::from(buf.take_write_buf().as_ref());
stabilize_date_header(&mut res);
let res = &res[..];
// expect that unrequested error response is sent back since connection could not be cleaned
let exp = http_msg(
r"
HTTP/1.1 500 Internal Server Error
content-length: 0
connection: close
date: Thu, 01 Jan 1970 12:34:56 UTC
",
);
assert_eq!(
res,
exp,
"\nexpected response not in write buffer:\n\
response: {:?}\n\
expected: {:?}",
String::from_utf8_lossy(res),
String::from_utf8_lossy(&exp)
);
})
.await;
}
fn http_msg(msg: impl AsRef<str>) -> BytesMut {
let mut msg = msg
.as_ref()
.trim()
.split('\n')
.into_iter()
.map(|line| [line.trim_start(), "\r"].concat())
.collect::<Vec<_>>()
.join("\n");
// remove trailing \r
msg.pop();
if !msg.is_empty() && !msg.contains("\r\n\r\n") {
msg.push_str("\r\n\r\n");
}
BytesMut::from(msg.as_bytes())
}
#[test]
fn http_msg_creates_msg() {
assert_eq!(http_msg(r""), "");
assert_eq!(
http_msg(
r"
POST / HTTP/1.1
Content-Length: 3
abc
"
),
"POST / HTTP/1.1\r\nContent-Length: 3\r\n\r\nabc"
);
assert_eq!(
http_msg(
r"
GET / HTTP/1.1
Content-Length: 3
"
),
"GET / HTTP/1.1\r\nContent-Length: 3\r\n\r\n"
);
}

View File

@ -1,27 +1,29 @@
use std::io::Write;
use std::marker::PhantomData;
use std::ptr::copy_nonoverlapping;
use std::slice::from_raw_parts_mut;
use std::{cmp, io};
use std::{
cmp,
io::{self, Write as _},
marker::PhantomData,
ptr::copy_nonoverlapping,
slice::from_raw_parts_mut,
};
use bytes::{BufMut, BytesMut};
use crate::body::BodySize;
use crate::config::ServiceConfig;
use crate::header::map;
use crate::helpers;
use crate::http::header::{CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING};
use crate::http::{HeaderMap, StatusCode, Version};
use crate::message::{ConnectionType, RequestHeadType};
use crate::response::Response;
use crate::{
body::BodySize,
header::{
map::Value, HeaderMap, HeaderName, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING,
},
helpers, ConnectionType, RequestHeadType, Response, ServiceConfig, StatusCode, Version,
};
const AVERAGE_HEADER_SIZE: usize = 30;
#[derive(Debug)]
pub(crate) struct MessageEncoder<T: MessageType> {
#[allow(dead_code)]
pub length: BodySize,
pub te: TransferEncoding,
_t: PhantomData<T>,
_phantom: PhantomData<T>,
}
impl<T: MessageType> Default for MessageEncoder<T> {
@ -29,7 +31,7 @@ impl<T: MessageType> Default for MessageEncoder<T> {
MessageEncoder {
length: BodySize::None,
te: TransferEncoding::empty(),
_t: PhantomData,
_phantom: PhantomData,
}
}
}
@ -54,7 +56,7 @@ pub(crate) trait MessageType: Sized {
dst: &mut BytesMut,
version: Version,
mut length: BodySize,
ctype: ConnectionType,
conn_type: ConnectionType,
config: &ServiceConfig,
) -> io::Result<()> {
let chunked = self.chunked();
@ -69,17 +71,28 @@ pub(crate) trait MessageType: Sized {
| StatusCode::PROCESSING
| StatusCode::NO_CONTENT => {
// skip content-length and transfer-encoding headers
// See https://tools.ietf.org/html/rfc7230#section-3.3.1
// and https://tools.ietf.org/html/rfc7230#section-3.3.2
// see https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.1
// and https://datatracker.ietf.org/doc/html/rfc7230#section-3.3.2
skip_len = true;
length = BodySize::None
}
StatusCode::NOT_MODIFIED => {
// 304 responses should never have a body but should retain a manually set
// content-length header
// see https://datatracker.ietf.org/doc/html/rfc7232#section-4.1
skip_len = false;
length = BodySize::None;
}
_ => {}
}
}
match length {
BodySize::Stream => {
if chunked {
skip_len = true;
if camel_case {
dst.put_slice(b"\r\nTransfer-Encoding: chunked\r\n")
} else {
@ -90,19 +103,14 @@ pub(crate) trait MessageType: Sized {
dst.put_slice(b"\r\n");
}
}
BodySize::Empty => {
if camel_case {
dst.put_slice(b"\r\nContent-Length: 0\r\n");
} else {
dst.put_slice(b"\r\ncontent-length: 0\r\n");
}
}
BodySize::Sized(len) => helpers::write_content_length(len, dst),
BodySize::Sized(0) if camel_case => dst.put_slice(b"\r\nContent-Length: 0\r\n"),
BodySize::Sized(0) => dst.put_slice(b"\r\ncontent-length: 0\r\n"),
BodySize::Sized(len) => helpers::write_content_length(len, dst, camel_case),
BodySize::None => dst.put_slice(b"\r\n"),
}
// Connection
match ctype {
match conn_type {
ConnectionType::Upgrade => dst.put_slice(b"connection: upgrade\r\n"),
ConnectionType::KeepAlive if version < Version::HTTP_11 => {
if camel_case {
@ -118,24 +126,14 @@ pub(crate) trait MessageType: Sized {
dst.put_slice(b"connection: close\r\n")
}
}
_ => (),
_ => {}
}
// merging headers from head and extra headers. HeaderMap::new() does not allocate.
let empty_headers = HeaderMap::new();
let extra_headers = self.extra_headers().unwrap_or(&empty_headers);
let headers = self
.headers()
.inner
.iter()
.filter(|(name, _)| !extra_headers.contains_key(*name))
.chain(extra_headers.inner.iter());
// write headers
let mut has_date = false;
let mut buf = dst.bytes_mut().as_mut_ptr() as *mut u8;
let mut buf = dst.chunk_mut().as_mut_ptr();
let mut remaining = dst.capacity() - dst.len();
// tracks bytes written since last buffer resize
@ -143,117 +141,66 @@ pub(crate) trait MessageType: Sized {
// container's knowledge, this is used to sync the containers cursor after data is written
let mut pos = 0;
for (key, value) in headers {
self.write_headers(|key, value| {
match *key {
CONNECTION => continue,
TRANSFER_ENCODING | CONTENT_LENGTH if skip_len => continue,
CONNECTION => return,
TRANSFER_ENCODING | CONTENT_LENGTH if skip_len => return,
DATE => has_date = true,
_ => (),
_ => {}
}
let k = key.as_str().as_bytes();
let k_len = k.len();
match value {
map::Value::One(ref val) => {
let v = val.as_ref();
let v_len = v.len();
for val in value.iter() {
let v = val.as_ref();
let v_len = v.len();
// key length + value length + colon + space + \r\n
let len = k_len + v_len + 4;
// key length + value length + colon + space + \r\n
let len = k_len + v_len + 4;
if len > remaining {
// not enough room in buffer for this header; reserve more space
// SAFETY: all the bytes written up to position "pos" are initialized
// the written byte count and pointer advancement are kept in sync
unsafe {
dst.advance_mut(pos);
}
pos = 0;
dst.reserve(len * 2);
remaining = dst.capacity() - dst.len();
// re-assign buf raw pointer since it's possible that the buffer was
// reallocated and/or resized
buf = dst.bytes_mut().as_mut_ptr() as *mut u8;
}
// SAFETY: on each write, it is enough to ensure that the advancement of the
// cursor matches the number of bytes written
if len > remaining {
// SAFETY: all the bytes written up to position "pos" are initialized
// the written byte count and pointer advancement are kept in sync
unsafe {
// use upper Camel-Case
if camel_case {
write_camel_case(k, from_raw_parts_mut(buf, k_len))
} else {
write_data(k, buf, k_len)
}
buf = buf.add(k_len);
write_data(b": ", buf, 2);
buf = buf.add(2);
write_data(v, buf, v_len);
buf = buf.add(v_len);
write_data(b"\r\n", buf, 2);
buf = buf.add(2);
dst.advance_mut(pos);
}
pos += len;
remaining -= len;
pos = 0;
dst.reserve(len * 2);
remaining = dst.capacity() - dst.len();
// re-assign buf raw pointer since it's possible that the buffer was
// reallocated and/or resized
buf = dst.chunk_mut().as_mut_ptr();
}
map::Value::Multi(ref vec) => {
for val in vec {
let v = val.as_ref();
let v_len = v.len();
let len = k_len + v_len + 4;
if len > remaining {
// SAFETY: all the bytes written up to position "pos" are initialized
// the written byte count and pointer advancement are kept in sync
unsafe {
dst.advance_mut(pos);
}
pos = 0;
dst.reserve(len * 2);
remaining = dst.capacity() - dst.len();
// re-assign buf raw pointer since it's possible that the buffer was
// reallocated and/or resized
buf = dst.bytes_mut().as_mut_ptr() as *mut u8;
}
// SAFETY: on each write, it is enough to ensure that the advancement of
// the cursor matches the number of bytes written
unsafe {
if camel_case {
write_camel_case(k, from_raw_parts_mut(buf, k_len));
} else {
write_data(k, buf, k_len);
}
buf = buf.add(k_len);
write_data(b": ", buf, 2);
buf = buf.add(2);
write_data(v, buf, v_len);
buf = buf.add(v_len);
write_data(b"\r\n", buf, 2);
buf = buf.add(2);
};
pos += len;
remaining -= len;
// SAFETY: on each write, it is enough to ensure that the advancement of
// the cursor matches the number of bytes written
unsafe {
if camel_case {
// use Camel-Case headers
write_camel_case(k, buf, k_len);
} else {
write_data(k, buf, k_len);
}
}
buf = buf.add(k_len);
write_data(b": ", buf, 2);
buf = buf.add(2);
write_data(v, buf, v_len);
buf = buf.add(v_len);
write_data(b"\r\n", buf, 2);
buf = buf.add(2);
};
pos += len;
remaining -= len;
}
}
});
// final cursor synchronization with the bytes container
//
@ -263,16 +210,34 @@ pub(crate) trait MessageType: Sized {
dst.advance_mut(pos);
}
// optimized date header, set_date writes \r\n
if !has_date {
config.set_date(dst);
} else {
// msg eof
dst.extend_from_slice(b"\r\n");
// optimized date header, write_date_header writes its own \r\n
config.write_date_header(dst, camel_case);
}
// end-of-headers marker
dst.extend_from_slice(b"\r\n");
Ok(())
}
fn write_headers<F>(&mut self, mut f: F)
where
F: FnMut(&HeaderName, &Value),
{
match self.extra_headers() {
Some(headers) => {
// merging headers from head and extra headers.
self.headers()
.inner
.iter()
.filter(|(name, _)| !headers.contains_key(*name))
.chain(headers.inner.iter())
.for_each(|(k, v)| f(k, v))
}
None => self.headers().inner.iter().for_each(|(k, v)| f(k, v)),
}
}
}
impl MessageType for Response<()> {
@ -292,6 +257,12 @@ impl MessageType for Response<()> {
None
}
fn camel_case(&self) -> bool {
self.head()
.flags
.contains(crate::message::Flags::CAMEL_CASE)
}
fn encode_status(&mut self, dst: &mut BytesMut) -> io::Result<()> {
let head = self.head();
let reason = head.reason().as_bytes();
@ -329,7 +300,7 @@ impl MessageType for RequestHeadType {
let head = self.as_ref();
dst.reserve(256 + head.headers.len() * AVERAGE_HEADER_SIZE);
write!(
Writer(dst),
helpers::MutWriter(dst),
"{} {} {}",
head.method,
head.uri.path_and_query().map(|u| u.as_str()).unwrap_or("/"),
@ -339,11 +310,7 @@ impl MessageType for RequestHeadType {
Version::HTTP_11 => "HTTP/1.1",
Version::HTTP_2 => "HTTP/2.0",
Version::HTTP_3 => "HTTP/3.0",
_ =>
return Err(io::Error::new(
io::ErrorKind::Other,
"unsupported version"
)),
_ => return Err(io::Error::new(io::ErrorKind::Other, "unsupported version")),
}
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
@ -351,16 +318,17 @@ impl MessageType for RequestHeadType {
}
impl<T: MessageType> MessageEncoder<T> {
/// Encode message
/// Encode chunk.
pub fn encode_chunk(&mut self, msg: &[u8], buf: &mut BytesMut) -> io::Result<bool> {
self.te.encode(msg, buf)
}
/// Encode eof
/// Encode EOF.
pub fn encode_eof(&mut self, buf: &mut BytesMut) -> io::Result<()> {
self.te.encode_eof(buf)
}
/// Encode message.
pub fn encode(
&mut self,
dst: &mut BytesMut,
@ -369,13 +337,13 @@ impl<T: MessageType> MessageEncoder<T> {
stream: bool,
version: Version,
length: BodySize,
ctype: ConnectionType,
conn_type: ConnectionType,
config: &ServiceConfig,
) -> io::Result<()> {
// transfer encoding
if !head {
self.te = match length {
BodySize::Empty => TransferEncoding::empty(),
BodySize::Sized(0) => TransferEncoding::empty(),
BodySize::Sized(len) => TransferEncoding::length(len),
BodySize::Stream => {
if message.chunked() && !stream {
@ -391,7 +359,7 @@ impl<T: MessageType> MessageEncoder<T> {
}
message.encode_status(dst)?;
message.encode_headers(dst, version, length, ctype, config)
message.encode_headers(dst, version, length, conn_type, config)
}
}
@ -405,10 +373,12 @@ pub(crate) struct TransferEncoding {
enum TransferEncodingKind {
/// An Encoder for when Transfer-Encoding includes `chunked`.
Chunked(bool),
/// An Encoder for when Content-Length is set.
///
/// Enforces that the body is not longer than the Content-Length header.
Length(u64),
/// An Encoder for when Content-Length is not known.
///
/// Application decides when to stop writing.
@ -462,7 +432,7 @@ impl TransferEncoding {
*eof = true;
buf.extend_from_slice(b"0\r\n\r\n");
} else {
writeln!(Writer(buf), "{:X}\r", msg.len())
writeln!(helpers::MutWriter(buf), "{:X}\r", msg.len())
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
buf.reserve(msg.len() + 2);
@ -512,50 +482,44 @@ impl TransferEncoding {
}
}
struct Writer<'a>(pub &'a mut BytesMut);
impl<'a> io::Write for Writer<'a> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.extend_from_slice(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
/// # Safety
/// Callers must ensure that the given length matches given value length.
/// Callers must ensure that the given `len` matches the given `value` length and that `buf` is
/// valid for writes of at least `len` bytes.
unsafe fn write_data(value: &[u8], buf: *mut u8, len: usize) {
debug_assert_eq!(value.len(), len);
copy_nonoverlapping(value.as_ptr(), buf, len);
}
fn write_camel_case(value: &[u8], buffer: &mut [u8]) {
let mut index = 0;
let key = value;
let mut key_iter = key.iter();
/// # Safety
/// Callers must ensure that the given `len` matches the given `value` length and that `buf` is
/// valid for writes of at least `len` bytes.
unsafe fn write_camel_case(value: &[u8], buf: *mut u8, len: usize) {
// first copy entire (potentially wrong) slice to output
write_data(value, buf, len);
if let Some(c) = key_iter.next() {
if *c >= b'a' && *c <= b'z' {
buffer[index] = *c ^ b' ';
index += 1;
}
} else {
return;
// SAFETY: We just initialized the buffer with `value`
let buffer = from_raw_parts_mut(buf, len);
let mut iter = value.iter();
// first character should be uppercase
if let Some(c @ b'a'..=b'z') = iter.next() {
buffer[0] = c & 0b1101_1111;
}
while let Some(c) = key_iter.next() {
buffer[index] = *c;
index += 1;
if *c == b'-' {
if let Some(c) = key_iter.next() {
if *c >= b'a' && *c <= b'z' {
buffer[index] = *c ^ b' ';
index += 1;
}
// track 1 ahead of the current position since that's the location being assigned to
let mut index = 2;
// remaining characters after hyphens should also be uppercase
while let Some(&c) = iter.next() {
if c == b'-' {
// advance iter by one and uppercase if needed
if let Some(c @ b'a'..=b'z') = iter.next() {
buffer[index] = c & 0b1101_1111;
}
}
index += 1;
}
}
@ -567,8 +531,10 @@ mod tests {
use http::header::AUTHORIZATION;
use super::*;
use crate::http::header::{HeaderValue, CONTENT_TYPE};
use crate::RequestHead;
use crate::{
header::{HeaderValue, CONTENT_TYPE},
RequestHead,
};
#[test]
fn test_chunked_te() {
@ -584,8 +550,8 @@ mod tests {
);
}
#[test]
fn test_camel_case() {
#[actix_rt::test]
async fn test_camel_case() {
let mut bytes = BytesMut::with_capacity(2048);
let mut head = RequestHead::default();
head.set_camel_case_headers(true);
@ -598,12 +564,12 @@ mod tests {
let _ = head.encode_headers(
&mut bytes,
Version::HTTP_11,
BodySize::Empty,
BodySize::Sized(0),
ConnectionType::Close,
&ServiceConfig::default(),
);
let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(data.contains("Content-Length: 0\r\n"));
assert!(data.contains("Connection: close\r\n"));
assert!(data.contains("Content-Type: plain/text\r\n"));
@ -616,8 +582,7 @@ mod tests {
ConnectionType::KeepAlive,
&ServiceConfig::default(),
);
let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(data.contains("Transfer-Encoding: chunked\r\n"));
assert!(data.contains("Content-Type: plain/text\r\n"));
assert!(data.contains("Date: date\r\n"));
@ -638,16 +603,15 @@ mod tests {
ConnectionType::KeepAlive,
&ServiceConfig::default(),
);
let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(data.contains("transfer-encoding: chunked\r\n"));
assert!(data.contains("content-type: xml\r\n"));
assert!(data.contains("content-type: plain/text\r\n"));
assert!(data.contains("date: date\r\n"));
}
#[test]
fn test_extra_headers() {
#[actix_rt::test]
async fn test_extra_headers() {
let mut bytes = BytesMut::with_capacity(2048);
let mut head = RequestHead::default();
@ -668,28 +632,25 @@ mod tests {
let _ = head.encode_headers(
&mut bytes,
Version::HTTP_11,
BodySize::Empty,
BodySize::Sized(0),
ConnectionType::Close,
&ServiceConfig::default(),
);
let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(data.contains("content-length: 0\r\n"));
assert!(data.contains("connection: close\r\n"));
assert!(data.contains("authorization: another authorization\r\n"));
assert!(data.contains("date: date\r\n"));
}
#[test]
fn test_no_content_length() {
#[actix_rt::test]
async fn test_no_content_length() {
let mut bytes = BytesMut::with_capacity(2048);
let mut res: Response<()> =
Response::new(StatusCode::SWITCHING_PROTOCOLS).into_body::<()>();
let mut res = Response::with_body(StatusCode::SWITCHING_PROTOCOLS, ());
res.headers_mut().insert(DATE, HeaderValue::from_static(""));
res.headers_mut()
.insert(DATE, HeaderValue::from_static(&""));
res.headers_mut()
.insert(CONTENT_LENGTH, HeaderValue::from_static(&"0"));
.insert(CONTENT_LENGTH, HeaderValue::from_static("0"));
let _ = res.encode_headers(
&mut bytes,
@ -698,8 +659,7 @@ mod tests {
ConnectionType::Upgrade,
&ServiceConfig::default(),
);
let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
let data = String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(!data.contains("content-length: 0\r\n"));
assert!(!data.contains("transfer-encoding: chunked\r\n"));
}

View File

@ -1,18 +1,14 @@
use std::task::{Context, Poll};
use actix_service::{Service, ServiceFactory};
use futures_util::future::{ready, Ready};
use actix_utils::future::{ready, Ready};
use crate::error::Error;
use crate::request::Request;
use crate::{Error, Request};
pub struct ExpectHandler;
impl ServiceFactory for ExpectHandler {
type Config = ();
type Request = Request;
impl ServiceFactory<Request> for ExpectHandler {
type Response = Request;
type Error = Error;
type Config = ();
type Service = ExpectHandler;
type InitError = Error;
type Future = Ready<Result<Self::Service, Self::InitError>>;
@ -22,17 +18,14 @@ impl ServiceFactory for ExpectHandler {
}
}
impl Service for ExpectHandler {
type Request = Request;
impl Service<Request> for ExpectHandler {
type Response = Request;
type Error = Error;
type Future = Ready<Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
actix_service::always_ready!();
fn call(&mut self, req: Request) -> Self::Future {
fn call(&self, req: Request) -> Self::Future {
ready(Ok(req))
// TODO: add some way to trigger error
// Err(error::ErrorExpectationFailed("test"))

View File

@ -1,14 +1,19 @@
//! HTTP/1 implementation
//! HTTP/1 protocol implementation.
use bytes::{Bytes, BytesMut};
mod chunked;
mod client;
mod codec;
mod decoder;
mod dispatcher;
#[cfg(test)]
mod dispatcher_tests;
mod encoder;
mod expect;
mod payload;
mod service;
mod timer;
mod upgrade;
mod utils;
@ -17,16 +22,17 @@ pub use self::codec::Codec;
pub use self::dispatcher::Dispatcher;
pub use self::expect::ExpectHandler;
pub use self::payload::Payload;
pub use self::service::{H1Service, H1ServiceHandler, OneRequest};
pub use self::service::{H1Service, H1ServiceHandler};
pub use self::upgrade::UpgradeHandler;
pub use self::utils::SendResponse;
#[derive(Debug)]
/// Codec message
pub enum Message<T> {
/// Http message
/// HTTP message.
Item(T),
/// Payload chunk
/// Payload chunk.
Chunk(Option<Bytes>),
}
@ -57,7 +63,7 @@ pub(crate) fn reserve_readbuf(src: &mut BytesMut) {
#[cfg(test)]
mod tests {
use super::*;
use crate::request::Request;
use crate::Request;
impl Message<Request> {
pub fn message(self) -> Request {

View File

@ -1,11 +1,13 @@
//! Payload stream
use std::cell::RefCell;
use std::collections::VecDeque;
use std::pin::Pin;
use std::rc::{Rc, Weak};
use std::task::{Context, Poll};
use actix_utils::task::LocalWaker;
use std::{
cell::RefCell,
collections::VecDeque,
pin::Pin,
rc::{Rc, Weak},
task::{Context, Poll, Waker},
};
use bytes::Bytes;
use futures_core::Stream;
@ -23,39 +25,32 @@ pub enum PayloadStatus {
/// Buffered stream of bytes chunks
///
/// Payload stores chunks in a vector. First chunk can be received with
/// `.readany()` method. Payload stream is not thread safe. Payload does not
/// notify current task when new data is available.
/// Payload stores chunks in a vector. First chunk can be received with `poll_next`. Payload does
/// not notify current task when new data is available.
///
/// Payload stream can be used as `Response` body stream.
/// Payload can be used as `Response` body stream.
#[derive(Debug)]
pub struct Payload {
inner: Rc<RefCell<Inner>>,
}
impl Payload {
/// Create payload stream.
/// Creates a payload stream.
///
/// This method construct two objects responsible for bytes stream
/// generation.
///
/// * `PayloadSender` - *Sender* side of the stream
///
/// * `Payload` - *Receiver* side of the stream
/// This method construct two objects responsible for bytes stream generation:
/// - `PayloadSender` - *Sender* side of the stream
/// - `Payload` - *Receiver* side of the stream
pub fn create(eof: bool) -> (PayloadSender, Payload) {
let shared = Rc::new(RefCell::new(Inner::new(eof)));
(
PayloadSender {
inner: Rc::downgrade(&shared),
},
PayloadSender::new(Rc::downgrade(&shared)),
Payload { inner: shared },
)
}
/// Create empty payload
#[doc(hidden)]
pub fn empty() -> Payload {
/// Creates an empty payload.
pub(crate) fn empty() -> Payload {
Payload {
inner: Rc::new(RefCell::new(Inner::new(true))),
}
@ -78,14 +73,6 @@ impl Payload {
pub fn unread_data(&mut self, data: Bytes) {
self.inner.borrow_mut().unread_data(data);
}
#[inline]
pub fn readany(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, PayloadError>>> {
self.inner.borrow_mut().readany(cx)
}
}
impl Stream for Payload {
@ -95,7 +82,7 @@ impl Stream for Payload {
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, PayloadError>>> {
self.inner.borrow_mut().readany(cx)
Pin::new(&mut *self.inner.borrow_mut()).poll_next(cx)
}
}
@ -105,6 +92,10 @@ pub struct PayloadSender {
}
impl PayloadSender {
fn new(inner: Weak<RefCell<Inner>>) -> Self {
Self { inner }
}
#[inline]
pub fn set_error(&mut self, err: PayloadError) {
if let Some(shared) = self.inner.upgrade() {
@ -134,7 +125,7 @@ impl PayloadSender {
if shared.borrow().need_read {
PayloadStatus::Read
} else {
shared.borrow_mut().io_task.register(cx.waker());
shared.borrow_mut().register_io(cx);
PayloadStatus::Pause
}
} else {
@ -150,8 +141,8 @@ struct Inner {
err: Option<PayloadError>,
need_read: bool,
items: VecDeque<Bytes>,
task: LocalWaker,
io_task: LocalWaker,
task: Option<Waker>,
io_task: Option<Waker>,
}
impl Inner {
@ -162,8 +153,46 @@ impl Inner {
err: None,
items: VecDeque::new(),
need_read: true,
task: LocalWaker::new(),
io_task: LocalWaker::new(),
task: None,
io_task: None,
}
}
/// Wake up future waiting for payload data to be available.
fn wake(&mut self) {
if let Some(waker) = self.task.take() {
waker.wake();
}
}
/// Wake up future feeding data to Payload.
fn wake_io(&mut self) {
if let Some(waker) = self.io_task.take() {
waker.wake();
}
}
/// Register future waiting data from payload.
/// Waker would be used in `Inner::wake`
fn register(&mut self, cx: &mut Context<'_>) {
if self
.task
.as_ref()
.map_or(true, |w| !cx.waker().will_wake(w))
{
self.task = Some(cx.waker().clone());
}
}
// Register future feeding data to payload.
/// Waker would be used in `Inner::wake_io`
fn register_io(&mut self, cx: &mut Context<'_>) {
if self
.io_task
.as_ref()
.map_or(true, |w| !cx.waker().will_wake(w))
{
self.io_task = Some(cx.waker().clone());
}
}
@ -182,7 +211,7 @@ impl Inner {
self.len += data.len();
self.items.push_back(data);
self.need_read = self.len < MAX_BUFFER_SIZE;
self.task.wake();
self.wake();
}
#[cfg(test)]
@ -190,8 +219,8 @@ impl Inner {
self.len
}
fn readany(
&mut self,
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, PayloadError>>> {
if let Some(data) = self.items.pop_front() {
@ -199,9 +228,9 @@ impl Inner {
self.need_read = self.len < MAX_BUFFER_SIZE;
if self.need_read && !self.eof {
self.task.register(cx.waker());
self.register(cx);
}
self.io_task.wake();
self.wake_io();
Poll::Ready(Some(Ok(data)))
} else if let Some(err) = self.err.take() {
Poll::Ready(Some(Err(err)))
@ -209,8 +238,8 @@ impl Inner {
Poll::Ready(None)
} else {
self.need_read = true;
self.task.register(cx.waker());
self.io_task.wake();
self.register(cx);
self.wake_io();
Poll::Pending
}
}
@ -223,8 +252,18 @@ impl Inner {
#[cfg(test)]
mod tests {
use std::panic::{RefUnwindSafe, UnwindSafe};
use actix_utils::future::poll_fn;
use static_assertions::{assert_impl_all, assert_not_impl_any};
use super::*;
use futures_util::future::poll_fn;
assert_impl_all!(Payload: Unpin);
assert_not_impl_any!(Payload: Send, Sync, UnwindSafe, RefUnwindSafe);
assert_impl_all!(Inner: Unpin, Send, Sync);
assert_not_impl_any!(Inner: UnwindSafe, RefUnwindSafe);
#[actix_rt::test]
async fn test_unread_data() {
@ -236,7 +275,10 @@ mod tests {
assert_eq!(
Bytes::from("data"),
poll_fn(|cx| payload.readany(cx)).await.unwrap().unwrap()
poll_fn(|cx| Pin::new(&mut payload).poll_next(cx))
.await
.unwrap()
.unwrap()
);
}
}

View File

@ -1,50 +1,49 @@
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::rc::Rc;
use std::task::{Context, Poll};
use std::{fmt, net};
use std::{
fmt,
marker::PhantomData,
net,
rc::Rc,
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_rt::net::TcpStream;
use actix_service::{pipeline_factory, IntoServiceFactory, Service, ServiceFactory};
use futures_core::ready;
use futures_util::future::{ok, Ready};
use actix_service::{
fn_service, IntoServiceFactory, Service, ServiceFactory, ServiceFactoryExt as _,
};
use actix_utils::future::ready;
use futures_core::future::LocalBoxFuture;
use crate::body::MessageBody;
use crate::cloneable::CloneableService;
use crate::config::ServiceConfig;
use crate::error::{DispatchError, Error, ParseError};
use crate::helpers::DataFactory;
use crate::request::Request;
use crate::response::Response;
use crate::{ConnectCallback, Extensions};
use crate::{
body::{BoxBody, MessageBody},
config::ServiceConfig,
error::DispatchError,
service::HttpServiceHandler,
ConnectCallback, OnConnectData, Request, Response,
};
use super::codec::Codec;
use super::dispatcher::Dispatcher;
use super::{ExpectHandler, Message, UpgradeHandler};
use super::{codec::Codec, dispatcher::Dispatcher, ExpectHandler, UpgradeHandler};
/// `ServiceFactory` implementation for HTTP1 transport
pub struct H1Service<T, S, B, X = ExpectHandler, U = UpgradeHandler<T>> {
pub struct H1Service<T, S, B, X = ExpectHandler, U = UpgradeHandler> {
srv: S,
cfg: ServiceConfig,
expect: X,
upgrade: Option<U>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
on_connect_ext: Option<Rc<ConnectCallback<T>>>,
_t: PhantomData<(T, B)>,
_phantom: PhantomData<B>,
}
impl<T, S, B> H1Service<T, S, B>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error>,
S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
{
/// Create new `HttpService` instance with config.
pub(crate) fn with_config<F: IntoServiceFactory<S>>(
pub(crate) fn with_config<F: IntoServiceFactory<S, Request>>(
cfg: ServiceConfig,
service: F,
) -> Self {
@ -53,44 +52,45 @@ where
srv: service.into_factory(),
expect: ExpectHandler,
upgrade: None,
on_connect: None,
on_connect_ext: None,
_t: PhantomData,
_phantom: PhantomData,
}
}
}
impl<S, B, X, U> H1Service<TcpStream, S, B, X, U>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error>,
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
X::Error: Into<Error>,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Future: 'static,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<
Config = (),
Request = (Request, Framed<TcpStream, Codec>),
Response = (),
>,
U::Error: fmt::Display + Into<Error>,
U: ServiceFactory<(Request, Framed<TcpStream, Codec>), Config = (), Response = ()>,
U::Future: 'static,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
/// Create simple tcp stream service
pub fn tcp(
self,
) -> impl ServiceFactory<
TcpStream,
Config = (),
Request = TcpStream,
Response = (),
Error = DispatchError,
InitError = (),
> {
pipeline_factory(|io: TcpStream| {
fn_service(|io: TcpStream| {
let peer_addr = io.peer_addr().ok();
ok((io, peer_addr))
ready(Ok((io, peer_addr)))
})
.and_then(self)
}
@ -100,114 +100,137 @@ where
mod openssl {
use super::*;
use actix_tls::openssl::{Acceptor, SslAcceptor, SslStream};
use actix_tls::{openssl::HandshakeError, TlsError};
use actix_tls::accept::{
openssl::{
reexports::{Error as SslError, SslAcceptor},
Acceptor, TlsStream,
},
TlsError,
};
impl<S, B, X, U> H1Service<SslStream<TcpStream>, S, B, X, U>
impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error>,
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
X::Error: Into<Error>,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Future: 'static,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<
(Request, Framed<TlsStream<TcpStream>, Codec>),
Config = (),
Request = (Request, Framed<SslStream<TcpStream>, Codec>),
Response = (),
>,
U::Error: fmt::Display + Into<Error>,
U::Future: 'static,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
/// Create openssl based service
/// Create OpenSSL based service.
pub fn openssl(
self,
acceptor: SslAcceptor,
) -> impl ServiceFactory<
TcpStream,
Config = (),
Request = TcpStream,
Response = (),
Error = TlsError<HandshakeError<TcpStream>, DispatchError>,
Error = TlsError<SslError, DispatchError>,
InitError = (),
> {
pipeline_factory(
Acceptor::new(acceptor)
.map_err(TlsError::Tls)
.map_init_err(|_| panic!()),
)
.and_then(|io: SslStream<TcpStream>| {
let peer_addr = io.get_ref().peer_addr().ok();
ok((io, peer_addr))
})
.and_then(self.map_err(TlsError::Service))
Acceptor::new(acceptor)
.map_init_err(|_| {
unreachable!("TLS acceptor service factory does not error on init")
})
.map_err(TlsError::into_service_error)
.map(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().peer_addr().ok();
(io, peer_addr)
})
.and_then(self.map_err(TlsError::Service))
}
}
}
#[cfg(feature = "rustls")]
mod rustls {
use std::io;
use actix_service::ServiceFactoryExt as _;
use actix_tls::accept::{
rustls::{reexports::ServerConfig, Acceptor, TlsStream},
TlsError,
};
use super::*;
use actix_tls::rustls::{Acceptor, ServerConfig, TlsStream};
use actix_tls::TlsError;
use std::{fmt, io};
impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error>,
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
X::Error: Into<Error>,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Future: 'static,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<
(Request, Framed<TlsStream<TcpStream>, Codec>),
Config = (),
Request = (Request, Framed<TlsStream<TcpStream>, Codec>),
Response = (),
>,
U::Error: fmt::Display + Into<Error>,
U::Future: 'static,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
/// Create rustls based service
/// Create Rustls based service.
pub fn rustls(
self,
config: ServerConfig,
) -> impl ServiceFactory<
TcpStream,
Config = (),
Request = TcpStream,
Response = (),
Error = TlsError<io::Error, DispatchError>,
InitError = (),
> {
pipeline_factory(
Acceptor::new(config)
.map_err(TlsError::Tls)
.map_init_err(|_| panic!()),
)
.and_then(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().0.peer_addr().ok();
ok((io, peer_addr))
})
.and_then(self.map_err(TlsError::Service))
Acceptor::new(config)
.map_init_err(|_| {
unreachable!("TLS acceptor service factory does not error on init")
})
.map_err(TlsError::into_service_error)
.map(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().0.peer_addr().ok();
(io, peer_addr)
})
.and_then(self.map_err(TlsError::Service))
}
}
}
impl<T, S, B, X, U> H1Service<T, S, B, X, U>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error>,
S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Response<BoxBody>>,
S::Response: Into<Response<B>>,
S::InitError: fmt::Debug,
B: MessageBody,
{
pub fn expect<X1>(self, expect: X1) -> H1Service<T, S, B, X1, U>
where
X1: ServiceFactory<Request = Request, Response = Request>,
X1::Error: Into<Error>,
X1: ServiceFactory<Request, Response = Request>,
X1::Error: Into<Response<BoxBody>>,
X1::InitError: fmt::Debug,
{
H1Service {
@ -215,15 +238,14 @@ where
cfg: self.cfg,
srv: self.srv,
upgrade: self.upgrade,
on_connect: self.on_connect,
on_connect_ext: self.on_connect_ext,
_t: PhantomData,
_phantom: PhantomData,
}
}
pub fn upgrade<U1>(self, upgrade: Option<U1>) -> H1Service<T, S, B, X, U1>
where
U1: ServiceFactory<Request = (Request, Framed<T, Codec>), Response = ()>,
U1: ServiceFactory<(Request, Framed<T, Codec>), Response = ()>,
U1::Error: fmt::Display,
U1::InitError: fmt::Debug,
{
@ -232,21 +254,11 @@ where
cfg: self.cfg,
srv: self.srv,
expect: self.expect,
on_connect: self.on_connect,
on_connect_ext: self.on_connect_ext,
_t: PhantomData,
_phantom: PhantomData,
}
}
/// Set on connect callback.
pub(crate) fn on_connect(
mut self,
f: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
) -> Self {
self.on_connect = f;
self
}
/// Set on connect callback.
pub(crate) fn on_connect_ext(mut self, f: Option<Rc<ConnectCallback<T>>>) -> Self {
self.on_connect_ext = f;
@ -254,349 +266,104 @@ where
}
}
impl<T, S, B, X, U> ServiceFactory for H1Service<T, S, B, X, U>
impl<T, S, B, X, U> ServiceFactory<(T, Option<net::SocketAddr>)> for H1Service<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error>,
T: AsyncRead + AsyncWrite + Unpin + 'static,
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>>,
S::Response: Into<Response<B>>,
S::InitError: fmt::Debug,
B: MessageBody,
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
X::Error: Into<Error>,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Future: 'static,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<Config = (), Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display + Into<Error>,
U: ServiceFactory<(Request, Framed<T, Codec>), Config = (), Response = ()>,
U::Future: 'static,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
type Config = ();
type Request = (T, Option<net::SocketAddr>);
type Response = ();
type Error = DispatchError;
type InitError = ();
type Config = ();
type Service = H1ServiceHandler<T, S::Service, B, X::Service, U::Service>;
type Future = H1ServiceResponse<T, S, B, X, U>;
fn new_service(&self, _: ()) -> Self::Future {
H1ServiceResponse {
fut: self.srv.new_service(()),
fut_ex: Some(self.expect.new_service(())),
fut_upg: self.upgrade.as_ref().map(|f| f.new_service(())),
expect: None,
upgrade: None,
on_connect: self.on_connect.clone(),
on_connect_ext: self.on_connect_ext.clone(),
cfg: Some(self.cfg.clone()),
_t: PhantomData,
}
}
}
#[doc(hidden)]
#[pin_project::pin_project]
pub struct H1ServiceResponse<T, S, B, X, U>
where
S: ServiceFactory<Request = Request>,
S::Error: Into<Error>,
S::InitError: fmt::Debug,
X: ServiceFactory<Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
U: ServiceFactory<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
U::InitError: fmt::Debug,
{
#[pin]
fut: S::Future,
#[pin]
fut_ex: Option<X::Future>,
#[pin]
fut_upg: Option<U::Future>,
expect: Option<X::Service>,
upgrade: Option<U::Service>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
on_connect_ext: Option<Rc<ConnectCallback<T>>>,
cfg: Option<ServiceConfig>,
_t: PhantomData<(T, B)>,
}
impl<T, S, B, X, U> Future for H1ServiceResponse<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
S: ServiceFactory<Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
S::InitError: fmt::Debug,
B: MessageBody,
X: ServiceFactory<Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
U: ServiceFactory<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
U::InitError: fmt::Debug,
{
type Output = Result<H1ServiceHandler<T, S::Service, B, X::Service, U::Service>, ()>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.as_mut().project();
if let Some(fut) = this.fut_ex.as_pin_mut() {
let expect = ready!(fut
.poll(cx)
.map_err(|e| log::error!("Init http service error: {:?}", e)))?;
this = self.as_mut().project();
*this.expect = Some(expect);
this.fut_ex.set(None);
}
if let Some(fut) = this.fut_upg.as_pin_mut() {
let upgrade = ready!(fut
.poll(cx)
.map_err(|e| log::error!("Init http service error: {:?}", e)))?;
this = self.as_mut().project();
*this.upgrade = Some(upgrade);
this.fut_ex.set(None);
}
let result = ready!(this
.fut
.poll(cx)
.map_err(|e| log::error!("Init http service error: {:?}", e)));
Poll::Ready(result.map(|service| {
let this = self.as_mut().project();
H1ServiceHandler::new(
this.cfg.take().unwrap(),
service,
this.expect.take().unwrap(),
this.upgrade.take(),
this.on_connect.clone(),
this.on_connect_ext.clone(),
)
}))
}
}
/// `Service` implementation for HTTP/1 transport
pub struct H1ServiceHandler<T, S: Service, B, X: Service, U: Service> {
srv: CloneableService<S>,
expect: CloneableService<X>,
upgrade: Option<CloneableService<U>>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
on_connect_ext: Option<Rc<ConnectCallback<T>>>,
cfg: ServiceConfig,
_t: PhantomData<(T, B)>,
}
impl<T, S, B, X, U> H1ServiceHandler<T, S, B, X, U>
where
S: Service<Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
fn new(
cfg: ServiceConfig,
srv: S,
expect: X,
upgrade: Option<U>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
on_connect_ext: Option<Rc<ConnectCallback<T>>>,
) -> H1ServiceHandler<T, S, B, X, U> {
H1ServiceHandler {
srv: CloneableService::new(srv),
expect: CloneableService::new(expect),
upgrade: upgrade.map(CloneableService::new),
cfg,
on_connect,
on_connect_ext,
_t: PhantomData,
}
}
}
impl<T, S, B, X, U> Service for H1ServiceHandler<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display + Into<Error>,
{
type Request = (T, Option<net::SocketAddr>);
type Response = ();
type Error = DispatchError;
type Future = Dispatcher<T, S, B, X, U>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let ready = self
.expect
.poll_ready(cx)
.map_err(|e| {
let e = e.into();
log::error!("Http service readiness error: {:?}", e);
DispatchError::Service(e)
})?
.is_ready();
let ready = self
.srv
.poll_ready(cx)
.map_err(|e| {
let e = e.into();
log::error!("Http service readiness error: {:?}", e);
DispatchError::Service(e)
})?
.is_ready()
&& ready;
let ready = if let Some(ref mut upg) = self.upgrade {
upg.poll_ready(cx)
.map_err(|e| {
let e = e.into();
log::error!("Http service readiness error: {:?}", e);
DispatchError::Service(e)
})?
.is_ready()
&& ready
} else {
ready
};
if ready {
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
}
fn call(&mut self, (io, addr): Self::Request) -> Self::Future {
let deprecated_on_connect = self.on_connect.as_ref().map(|handler| handler(&io));
let mut connect_extensions = Extensions::new();
if let Some(ref handler) = self.on_connect_ext {
// run on_connect_ext callback, populating connect extensions
handler(&io, &mut connect_extensions);
}
Dispatcher::new(
io,
self.cfg.clone(),
self.srv.clone(),
self.expect.clone(),
self.upgrade.clone(),
deprecated_on_connect,
connect_extensions,
addr,
)
}
}
/// `ServiceFactory` implementation for `OneRequestService` service
#[derive(Default)]
pub struct OneRequest<T> {
config: ServiceConfig,
_t: PhantomData<T>,
}
impl<T> OneRequest<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
/// Create new `H1SimpleService` instance.
pub fn new() -> Self {
OneRequest {
config: ServiceConfig::default(),
_t: PhantomData,
}
}
}
impl<T> ServiceFactory for OneRequest<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
type Config = ();
type Request = T;
type Response = (Request, Framed<T, Codec>);
type Error = ParseError;
type InitError = ();
type Service = OneRequestService<T>;
type Future = Ready<Result<Self::Service, Self::InitError>>;
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
ok(OneRequestService {
_t: PhantomData,
config: self.config.clone(),
let service = self.srv.new_service(());
let expect = self.expect.new_service(());
let upgrade = self.upgrade.as_ref().map(|s| s.new_service(()));
let on_connect_ext = self.on_connect_ext.clone();
let cfg = self.cfg.clone();
Box::pin(async move {
let expect = expect
.await
.map_err(|e| log::error!("Init http expect service error: {:?}", e))?;
let upgrade = match upgrade {
Some(upgrade) => {
let upgrade = upgrade
.await
.map_err(|e| log::error!("Init http upgrade service error: {:?}", e))?;
Some(upgrade)
}
None => None,
};
let service = service
.await
.map_err(|e| log::error!("Init http service error: {:?}", e))?;
Ok(H1ServiceHandler::new(
cfg,
service,
expect,
upgrade,
on_connect_ext,
))
})
}
}
/// `Service` implementation for HTTP1 transport. Reads one request and returns
/// request and framed object.
pub struct OneRequestService<T> {
_t: PhantomData<T>,
config: ServiceConfig,
}
/// `Service` implementation for HTTP/1 transport
pub type H1ServiceHandler<T, S, B, X, U> = HttpServiceHandler<T, S, B, X, U>;
impl<T> Service for OneRequestService<T>
impl<T, S, B, X, U> Service<(T, Option<net::SocketAddr>)> for HttpServiceHandler<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
{
type Request = T;
type Response = (Request, Framed<T, Codec>);
type Error = ParseError;
type Future = OneRequestServiceResponse<T>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
S: Service<Request>,
S::Error: Into<Response<BoxBody>>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request, Response = Request>,
X::Error: Into<Response<BoxBody>>,
U: Service<(Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display + Into<Response<BoxBody>>,
{
type Response = ();
type Error = DispatchError;
type Future = Dispatcher<T, S, B, X, U>;
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self._poll_ready(cx).map_err(|err| {
log::error!("HTTP/1 service readiness error: {:?}", err);
DispatchError::Service(err)
})
}
fn call(&mut self, req: Self::Request) -> Self::Future {
OneRequestServiceResponse {
framed: Some(Framed::new(req, Codec::new(self.config.clone()))),
}
}
}
#[doc(hidden)]
#[pin_project::pin_project]
pub struct OneRequestServiceResponse<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
#[pin]
framed: Option<Framed<T, Codec>>,
}
impl<T> Future for OneRequestServiceResponse<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
type Output = Result<(Request, Framed<T, Codec>), ParseError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.as_mut().project();
match ready!(this.framed.as_pin_mut().unwrap().next_item(cx)) {
Some(Ok(req)) => match req {
Message::Item(req) => {
let mut this = self.as_mut().project();
Poll::Ready(Ok((req, this.framed.take().unwrap())))
}
Message::Chunk(_) => unreachable!("Something is wrong"),
},
Some(Err(err)) => Poll::Ready(Err(err)),
None => Poll::Ready(Err(ParseError::Incomplete)),
}
fn call(&self, (io, addr): (T, Option<net::SocketAddr>)) -> Self::Future {
let conn_data = OnConnectData::from_io(&io, self.on_connect_ext.as_deref());
Dispatcher::new(io, self.flow.clone(), self.cfg.clone(), addr, conn_data)
}
}

View File

@ -0,0 +1,80 @@
use std::{fmt, future::Future, pin::Pin, task::Context};
use actix_rt::time::{Instant, Sleep};
#[derive(Debug)]
pub(super) enum TimerState {
Disabled,
Inactive,
Active { timer: Pin<Box<Sleep>> },
}
impl TimerState {
pub(super) fn new(enabled: bool) -> Self {
if enabled {
Self::Inactive
} else {
Self::Disabled
}
}
pub(super) fn is_enabled(&self) -> bool {
matches!(self, Self::Active { .. } | Self::Inactive)
}
pub(super) fn set(&mut self, timer: Sleep, line: u32) {
if matches!(self, Self::Disabled) {
log::trace!("setting disabled timer from line {}", line);
}
*self = Self::Active {
timer: Box::pin(timer),
};
}
pub(super) fn set_and_init(&mut self, cx: &mut Context<'_>, timer: Sleep, line: u32) {
self.set(timer, line);
self.init(cx);
}
pub(super) fn clear(&mut self, line: u32) {
if matches!(self, Self::Disabled) {
log::trace!("trying to clear a disabled timer from line {}", line);
}
if matches!(self, Self::Inactive) {
log::trace!("trying to clear an inactive timer from line {}", line);
}
*self = Self::Inactive;
}
pub(super) fn init(&mut self, cx: &mut Context<'_>) {
if let TimerState::Active { timer } = self {
let _ = timer.as_mut().poll(cx);
}
}
}
impl fmt::Display for TimerState {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
TimerState::Disabled => f.write_str("timer is disabled"),
TimerState::Inactive => f.write_str("timer is inactive"),
TimerState::Active { timer } => {
let deadline = timer.deadline();
let now = Instant::now();
if deadline < now {
f.write_str("timer is active and has reached deadline")
} else {
write!(
f,
"timer is active and due to expire in {} milliseconds",
((deadline - now).as_secs_f32() * 1000.0)
)
}
}
}
}
}

View File

@ -1,41 +1,32 @@
use std::marker::PhantomData;
use std::task::{Context, Poll};
use actix_codec::Framed;
use actix_service::{Service, ServiceFactory};
use futures_util::future::{ready, Ready};
use futures_core::future::LocalBoxFuture;
use crate::error::Error;
use crate::h1::Codec;
use crate::request::Request;
use crate::{h1::Codec, Error, Request};
pub struct UpgradeHandler<T>(pub(crate) PhantomData<T>);
pub struct UpgradeHandler;
impl<T> ServiceFactory for UpgradeHandler<T> {
type Config = ();
type Request = (Request, Framed<T, Codec>);
impl<T> ServiceFactory<(Request, Framed<T, Codec>)> for UpgradeHandler {
type Response = ();
type Error = Error;
type Service = UpgradeHandler<T>;
type Config = ();
type Service = UpgradeHandler;
type InitError = Error;
type Future = Ready<Result<Self::Service, Self::InitError>>;
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
unimplemented!()
}
}
impl<T> Service for UpgradeHandler<T> {
type Request = (Request, Framed<T, Codec>);
impl<T> Service<(Request, Framed<T, Codec>)> for UpgradeHandler {
type Response = ();
type Error = Error;
type Future = Ready<Result<Self::Response, Self::Error>>;
type Future = LocalBoxFuture<'static, Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
actix_service::always_ready!();
fn call(&mut self, _: Self::Request) -> Self::Future {
ready(Ok(()))
fn call(&self, _: (Request, Framed<T, Codec>)) -> Self::Future {
unimplemented!()
}
}

View File

@ -1,27 +1,35 @@
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{
future::Future,
pin::Pin,
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use pin_project_lite::pin_project;
use crate::body::{BodySize, MessageBody, ResponseBody};
use crate::error::Error;
use crate::h1::{Codec, Message};
use crate::response::Response;
use crate::{
body::{BodySize, MessageBody},
h1::{Codec, Message},
Error, Response,
};
/// Send HTTP/1 response
#[pin_project::pin_project]
pub struct SendResponse<T, B> {
res: Option<Message<(Response<()>, BodySize)>>,
#[pin]
body: Option<ResponseBody<B>>,
#[pin]
framed: Option<Framed<T, Codec>>,
pin_project! {
/// Send HTTP/1 response
pub struct SendResponse<T, B> {
res: Option<Message<(Response<()>, BodySize)>>,
#[pin]
body: Option<B>,
#[pin]
framed: Option<Framed<T, Codec>>,
}
}
impl<T, B> SendResponse<T, B>
where
B: MessageBody,
B::Error: Into<Error>,
{
pub fn new(framed: Framed<T, Codec>, response: Response<B>) -> Self {
let (res, body) = response.into_parts();
@ -37,7 +45,8 @@ where
impl<T, B> Future for SendResponse<T, B>
where
T: AsyncRead + AsyncWrite + Unpin,
B: MessageBody + Unpin,
B: MessageBody,
B::Error: Into<Error>,
{
type Output = Result<Framed<T, Codec>, Error>;
@ -60,15 +69,24 @@ where
.unwrap()
.is_write_buf_full()
{
match this.body.as_mut().as_pin_mut().unwrap().poll_next(cx)? {
let next = match this.body.as_mut().as_pin_mut().unwrap().poll_next(cx) {
Poll::Ready(Some(Ok(item))) => Poll::Ready(Some(item)),
Poll::Ready(Some(Err(err))) => return Poll::Ready(Err(err.into())),
Poll::Ready(None) => Poll::Ready(None),
Poll::Pending => Poll::Pending,
};
match next {
Poll::Ready(item) => {
// body is done when item is None
body_done = item.is_none();
if body_done {
let _ = this.body.take();
this.body.set(None);
}
let framed = this.framed.as_mut().as_pin_mut().unwrap();
framed.write(Message::Chunk(item))?;
framed
.write(Message::Chunk(item))
.map_err(|err| Error::new_send_response().with_cause(err))?;
}
Poll::Pending => body_ready = false,
}
@ -79,7 +97,10 @@ where
// flush write buffer
if !framed.is_write_buf_empty() {
match framed.flush(cx)? {
match framed
.flush(cx)
.map_err(|err| Error::new_send_response().with_cause(err))?
{
Poll::Ready(_) => {
if body_ready {
continue;
@ -93,7 +114,9 @@ where
// send response
if let Some(res) = this.res.take() {
framed.write(res)?;
framed
.write(res)
.map_err(|err| Error::new_send_response().with_cause(err))?;
continue;
}

Some files were not shown because too many files have changed in this diff Show More