1
0
mirror of https://github.com/fafhrd91/actix-web synced 2025-07-06 10:50:17 +02:00

Compare commits

...

205 Commits

Author SHA1 Message Date
991363a104 consistent case s/web/Web 2021-02-10 12:12:03 +00:00
a290e58982 prepare beta 2 release set (#1975) 2021-02-10 12:10:03 +00:00
dcad9724bc ensure poll_flush on h1 connection disconnect (#1974) 2021-02-10 10:11:53 +00:00
949d14ae2b clean up header map (#1964) 2021-02-09 22:59:17 +00:00
a6ed4aee84 add poll_flush after a non blocked write to h1 dispatcher (#1971) 2021-02-09 22:32:46 +00:00
519d7f2b8a add trust-dns optional feature for actix-http and awc (#1969) 2021-02-09 10:41:20 +00:00
dddb623a11 add services register for tuple and vec of services (#1933) 2021-02-07 23:47:51 +00:00
266cf0622c reduce branch.remove deadcode for h1 dispatcher (#1962) 2021-02-07 22:48:27 +00:00
9604e249c9 use stable clippy (#1963) 2021-02-07 20:33:53 +00:00
dbc47c9122 optimize actix-http messages (#1914) 2021-02-07 20:19:10 +00:00
4c243cbf89 simplify methods of awc::connect::Connect trait (#1941) 2021-02-07 18:56:39 +00:00
deafb7c8b8 Improve impl ResponseError documentation (#1939)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-02-07 04:54:41 +00:00
50309aa295 Use askama-escape for html escaping (#1953) 2021-02-07 04:50:23 +00:00
9eaea6a2fd tweak feature flags 2021-02-07 03:54:58 +00:00
830fb2cdb2 properly drop h2 connection (#1926) 2021-02-07 03:51:36 +00:00
7cfed73be8 fix memory usage for h1 and read bug on buffer size. (#1929) 2021-02-07 03:20:35 +00:00
41bc04b1c4 Use immutable reference of service state. Update awc dns resolver. (#1905) 2021-02-07 01:00:40 +00:00
20cf0094e5 fix master branch build. change web::block output type. (#1957) 2021-02-06 16:23:59 +00:00
83fb4978ad fix awc test_client test (#1960) 2021-02-06 16:05:33 +00:00
51e54dac8b fix limit not working on HttpMessageBody::limit (#1938) 2021-01-27 10:49:57 +00:00
c201c15f8c Improve documentation for PayloadConfig (#1923) 2021-01-24 00:32:10 +00:00
0c8196f8b0 Remove HttpResponseBuilder::json2() (#1903)
It's not necessary to keep both json() and json2() around since the
former reduces the ownership of its parameter to a borrow only to pass
the reference to the latter. Users can instead borrow themselves when
passing an owned value: there doesn't need to be two separate functions.

This change also makes HttpResponseBuilder::json() take T: Deref so it
can accept both references and web extractors like web::Json.
2021-01-18 12:14:29 +00:00
ee10148444 revive commented out tests (#1912) 2021-01-17 05:19:32 +00:00
1c95fc2654 Refactor poll_keepalive for readability (#1901) 2021-01-16 00:15:06 +00:00
da69bb4d12 implement App::data as App::app_data(Data::new(T))) (#1906) 2021-01-15 23:37:33 +00:00
0a506bf2e9 cleanup top level doc comments 2021-01-15 05:38:50 +00:00
b2a9ba2ee4 Update PULL_REQUEST_TEMPLATE.md 2021-01-15 04:54:23 +00:00
f976150b67 return option item from Extensions::insert (#1904) 2021-01-15 04:22:42 +00:00
b1dd8d28bc response header rework (#1869) 2021-01-15 02:11:10 +00:00
4edeb5ce47 optimize ErrorHandler middleware (#1902) 2021-01-14 01:43:44 +00:00
d34a8689e5 Refactor h1 encoder (#1900) 2021-01-12 14:38:53 +00:00
a919d2de56 actix-files: Fix If-(Un)Modified to not consider sub-seconds (#1887) 2021-01-11 18:18:23 +00:00
46a8f28b74 fix actix-files doc about thread pool (#1898) 2021-01-11 17:27:33 +00:00
57398c6df1 Refactor/service request (#1893) 2021-01-11 01:29:16 +00:00
7affc6878e simplify h1 dispatcher (#1899)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-01-11 00:13:56 +00:00
46b2f7eaaf use a non leak pool for HttpRequestInner (#1889)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-01-10 22:59:44 +00:00
9e401b6ef7 refactor Scope (#1895) 2021-01-09 18:06:49 +00:00
fe392abeb4 remove actix-threadpool.use actix_rt::task::spawn_blocking (#1878) 2021-01-09 16:04:19 +00:00
f6cc829758 remove leaked box in REQUEST_POOL and RESPONSE_POOL (#1896) 2021-01-09 15:40:20 +00:00
6575ee93f2 big clean up and docs improvmenet of types mod (#1894) 2021-01-09 13:17:19 +00:00
530d03791d refactor Resource (#1883) 2021-01-09 03:36:58 +00:00
d40ae8c8ca use sync method on Responder trait (#1891) 2021-01-08 22:17:19 +00:00
2204614134 don't run awc doctests that rely on external public endpoints (#1888) 2021-01-08 12:00:58 +00:00
188ee44f81 remove copyless dependency (#1884) 2021-01-07 21:55:00 +00:00
a4c9aaf337 fix extra branch in h1 dispatcher timer (#1882) 2021-01-07 20:42:09 +00:00
c09186a2c0 prepare v4 beta releases (#1881) 2021-01-07 20:02:08 +00:00
d3c476b8c2 use env_logger builders in examples 2021-01-07 02:41:05 +00:00
dc23559f23 address clippy lints 2021-01-07 02:04:26 +00:00
6d710629af fix bug where upgrade future is not reset properly (#1880) 2021-01-07 00:57:34 +00:00
85753130d9 fmt 2021-01-07 00:35:19 +00:00
00ba8d5549 add http3 variant to protocol enum 2021-01-06 18:58:24 +00:00
51e9e1500b add docs to recent additions 2021-01-06 18:52:06 +00:00
a03dbe2dcf replace cloneable service with httpflow abstraction (#1876) 2021-01-06 18:43:52 +00:00
57a3722146 More refactor of app_service (#1879) 2021-01-06 18:11:20 +00:00
57da1d3c0f refactor app_service (#1877) 2021-01-06 11:35:30 +00:00
68117543ea major cleanup of middleware module (#1875)
* major cleanup of middleware module

* update changelog
2021-01-05 09:51:58 +00:00
4f5971d79e add Compat middleware (#1865) 2021-01-05 00:22:57 +00:00
93161df141 clean up body type (#1872) 2021-01-04 23:47:38 +00:00
e567873326 optimize message pool release (#1871) 2021-01-04 13:03:46 +00:00
7d632d0b7b use ByteString as container for websocket text message (#1864) 2021-01-04 11:27:32 +00:00
36aee18c64 fmt 2021-01-04 04:33:15 +00:00
007a145988 use ahash for internal hashmaps 2021-01-04 04:29:07 +00:00
2d4a174420 fmt 2021-01-04 01:01:35 +00:00
21f6c9d7a5 improve code readability 2021-01-04 00:49:02 +00:00
e1683313ec optimize ServiceRequest methods (#1870)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-01-04 00:32:41 +00:00
32de9f8840 Tokio 1.0 (#1813)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2021-01-03 23:47:04 +00:00
1f202d40e4 optimize write_camel_case in h1 encoder (#1868) 2021-01-03 16:53:01 +00:00
ad608aa64e optimize Resource and Scope service call (#1867) 2021-01-02 19:40:31 +00:00
a1b00b2cd0 change unreleased year 2021-01-02 00:12:18 +00:00
3beb4cf2da replace tinyvec with smallvec (#1866) 2021-01-01 23:18:25 +00:00
522c9a5ea6 update CoC text 2020-12-31 03:24:18 +00:00
102bb8f9ab update dot dep graphs 2020-12-29 00:22:28 +00:00
20b46cdaf9 format factory_tuple macro invocations (#1859) 2020-12-28 21:04:02 +00:00
2a2a20c3e7 bump msrv to 1.46 (#1858) 2020-12-28 00:44:15 +00:00
093d3a6c59 remove deprecated on_connect methods (#1857) 2020-12-27 23:23:30 +00:00
8c9ea43e23 address clippy warnings 2020-12-27 20:54:04 +00:00
f9fcf56d5c reduce branch in actix_http::h1::codec (#1854) 2020-12-27 20:37:53 +00:00
cbda928a33 Rename factory to handler (#1852) 2020-12-26 21:46:19 +00:00
1032f04ded remove unused actix_http::h1::OneRequest (#1853) 2020-12-26 12:46:36 +00:00
b373e1370d prepare files 0.5.0 release 2020-12-26 04:05:45 +00:00
404b5a7709 Add optional support for hidden files/directories (#1811) 2020-12-26 03:36:15 +00:00
ecf08d5156 Remove boxed future from h1 Dispatcher (#1836) 2020-12-24 19:15:17 +00:00
87655b3028 reduce one clone on Arc. (#1850) 2020-12-23 23:58:25 +00:00
3a192400a6 Simplify handler (#1843) 2020-12-23 15:47:07 +00:00
2a7f2c1d59 dispatcher internals testing (#1840) 2020-12-23 01:28:17 +00:00
05f104c240 improve NormalizePath docs (#1839) 2020-12-23 00:19:20 +00:00
4dccd092f3 Bump rand from 0.7.x to 0.8.x (#1845) 2020-12-22 23:45:31 +00:00
95ccf1c9bc replace actix_utils::oneshot with futures_channle::oneshot (#1844) 2020-12-21 16:42:20 +00:00
6cbf27508a simplify ExtractService's return type (#1842) 2020-12-20 02:20:29 +00:00
79de04d862 optimise Extract service (#1841) 2020-12-19 16:33:34 +00:00
a4dbaa8ed1 remove boxed future in DefaultHeaders middleware (#1838) 2020-12-18 23:08:59 +00:00
c7b4c6edfa Disable PR comment from codecov 2020-12-17 21:38:52 +09:00
2a5215c1d6 Remove boxed future from HttpMessage (#1834) 2020-12-17 11:40:49 +00:00
97f615c245 remove boxed futures on Json extract type (#1832) 2020-12-16 23:34:33 +00:00
1a361273e7 optimize bytes and string payload extractors (#1831) 2020-12-16 22:40:26 +00:00
d7ce648445 remove boxed future for Option<T> and Result<T, E> extract type (#1829)
* remove boxed future for Option<T> and Result<T, E> extract type

* use ready macro

* fix fmt
2020-12-16 18:34:10 +00:00
fabc68659b Intradoc links conversion (#1827)
* switching to nightly for intra-doc links

* actix-files intra-doc conversion

* more specific Result

* intradoc conversion complete

* rm blank comments and readme doc link fixes

* macros and broken links
2020-12-13 13:28:39 +00:00
542db82282 Simplify wake up of task (#1826) 2020-12-12 20:07:06 +00:00
ae63eb8bb2 fix clippy warnings (#1806)
* fix clippy warnings

* prevent CI fail status caused by codecov
2020-12-09 11:22:19 +00:00
7a3776b770 remove two unused generics on BoxedRouteFuture types. (#1820) 2020-12-09 10:47:59 +00:00
ff79c33fd4 remove a box (#1814) 2020-12-06 11:42:15 +00:00
b75a9b7a20 add error to message in test helper func (#1812) 2020-12-05 04:57:56 +09:00
d0c6ca7671 test-server => actix-http-test (#1807) 2020-12-02 17:23:30 +00:00
24d525d978 prepare web 3.3.2 release 2020-12-01 22:22:46 +00:00
1f70ef155d Fix match_pattern() returning None for scope with resource of empty path (#1798)
* fix match_pattern function not returning pattern where scope has resource of path ""

* remove print in test

* make comparison on existing else if block

* add fix to changelog
2020-12-01 13:39:41 +00:00
7981e0068a Remove a panic in normalize middleware (#1762)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-12-01 10:22:15 +09:00
32d59ca904 Upgrade socket2 dependency (#1803)
Upgrades to a version not making invalid assumptions about
the memory layout of std::net::SocketAddr
2020-12-01 04:18:02 +09:00
ea8bf36104 update web and awc changelogs 2020-11-29 16:35:35 +00:00
0b5b463cfa prepare web and awc releases
closes #1799
2020-11-29 16:33:45 +00:00
fe6ad816cc update dotgraphs 2020-11-25 00:54:00 +00:00
e72b787ba7 prepare actix-web and actix-http-test releases 2020-11-25 00:53:48 +00:00
efc317d3b0 prepare actix-http and awc releases 2020-11-25 00:07:56 +00:00
31057becca prepare actix-files release 0.4.1 2020-11-24 20:33:23 +00:00
f1a9b45437 improve docs for Files::new 2020-11-24 20:23:09 +00:00
5af46775b8 refactor quality and use TryFrom instead of custom trait (#1797) 2020-11-24 11:37:05 +00:00
70f4747a23 add method for getting accept type preference (#1793) 2020-11-24 10:08:57 +00:00
2f11ef089b fix rustdoc uploads 2020-11-24 00:29:13 +00:00
4100c50c70 add either extractor (#1788) 2020-11-20 18:02:41 +00:00
a929209967 actix-files intra-doc migration (#1785) 2020-11-10 23:54:38 +00:00
49e945c88f switching to nightly for intra-doc links (#1783) 2020-11-09 14:01:36 +00:00
9b42333fac Fix typo in Query extractor docs (#1777) 2020-11-06 13:34:42 +00:00
e5b86d189c Fix typo in request_data.rs (#1774) 2020-11-05 17:46:17 +00:00
4bfd5c2781 Upgrade serde_urlencoded to 0.7 (#1773) 2020-11-06 01:36:15 +09:00
9b6a089b36 fix awc doc example (#1772)
* fix awc readme example

Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-11-05 06:20:01 +08:00
ceac97bb8d Update config.yml 2020-11-04 15:08:12 +00:00
61b65aa64a add common 1xx http response builders (#1768)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-11-02 18:23:18 +09:00
5468c3c410 Drop content length headers from 101 responses (#1767)
Co-authored-by: Sebastian Mayr <smayr@atlassian.com>
2020-11-02 17:44:14 +09:00
b6385c2b4e Remove CoC on actix-http as duplicated 2020-10-31 12:12:19 +09:00
5135c1e3a0 Update CoC contact information 2020-10-31 12:06:51 +09:00
22b451cf2d fix deps.rs badge 2020-10-31 02:39:54 +00:00
42f51eb962 prepare web release 3.2.0 2020-10-30 03:15:22 +00:00
156c97cef2 prepare awc release 2.0.1 2020-10-30 02:50:53 +00:00
798d744eef prepare http release 2.1.0 2020-10-30 02:19:56 +00:00
4cb833616a deprecate builder if-x methods (#1760) 2020-10-30 02:10:05 +00:00
9963a5ef54 expose on_connect v2 (#1754)
Co-authored-by: Mikail Bagishov <bagishov.mikail@yandex.ru>
2020-10-30 02:03:26 +00:00
4519db36b2 register fns for custom request-derived logging units (#1749)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-10-29 18:38:49 +00:00
7030bf5fe8 Adding app_data to ServiceConfig (#1758)
Co-authored-by: Rob Ede <robjtede@icloud.com>
Co-authored-by: Augusto <augusto@flowciety.de>
2020-10-26 17:02:45 +00:00
20078fe603 Bump pin-project to 1.0 (#1733) 2020-10-25 19:41:44 +09:00
06e5042b94 use idenity encoding on client if no compression features are enabled (#1737)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-10-24 21:15:01 +01:00
41e7cec72f Re-export bytes::Buf and bytes::BufMut as well (#1750)
Co-authored-by: Daniel Egger <daniel.egger@axiros.com>
Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-10-24 20:31:23 +01:00
d45a1aa6b6 Add web::ReqData<T> extractor (#1748)
Co-authored-by: Jonas Platte <jonas@lumeo.com>
2020-10-24 18:49:50 +01:00
98243db9f1 Print unconfigured Data<T> type when attempting extraction (#1743)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-10-20 17:35:34 +01:00
f92742bdac Bump base64 to 0.13 (#1744) 2020-10-19 18:24:22 +01:00
e563025b16 always construct shortslice using debug checked new constructor (#1741) 2020-10-19 12:51:30 +01:00
cfd5b381f1 Implement Logger middleware regex exclude pattern (#1723)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-10-19 07:18:16 +01:00
2f84914146 Skip some tests that cause ICE on nightly (#1740) 2020-10-19 11:52:05 +09:00
d765e9099d Fix clippy::rc_buffer (#1728) 2020-10-10 09:26:05 +09:00
34b23f31c9 prepare files release 0.4.0 2020-10-06 22:08:33 +01:00
26c1a901d9 add files preference for utf8 text responses (#1714) 2020-10-06 21:56:28 +01:00
c2c71cc626 Fix/suppress clippy warnings (#1720) 2020-10-01 18:19:09 +09:00
aa11231ee5 prepare web release 3.1.0 (#1716) 2020-09-30 11:07:35 +01:00
b5812b15f0 Remove Sized Bound for web::Data (#1712) 2020-09-29 22:44:12 +01:00
b4e02fe29a Fix cyclic references in ResourceMap (#1708) 2020-09-25 17:42:49 +01:00
37c76a39ab Fix Multipart consuming payload before header checks (#1704)
* Fix Multipart consuming payload before header checks

What
--
Split up logic in the constructor into two functions:

- **from_boundary:** build Multipart from boundary and stream
- **from_error:** build Multipart for MultipartError

Also we make the `boundary`, `from_boundary`, `from_error`  methods public within the crate so that we can use them in the extractor.

The extractor is then able to perform header checks and only consume the
payload if the checks pass.

* Add tests

* Add payload consumption test

Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-09-25 14:50:37 +01:00
60e7e52276 Add TrailingSlash::MergeOnly behavior (#1695)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-09-25 12:50:59 +01:00
c53e9468bc prepare codegen 0.4.0 release (#1702) 2020-09-24 23:54:01 +01:00
162121bf8d Unify route macros (#1705) 2020-09-22 22:42:51 +01:00
f7bcad9567 split up files lib (#1685) 2020-09-20 23:18:25 +01:00
f9e3f78e45 eemove non-relevant comment from actix-http README.md (#1701) 2020-09-20 17:21:53 +01:00
1596893ef7 update actix-http dev-dependencies (#1696)
Co-authored-by: luojinming <luojm@hxsmart.com>
2020-09-19 23:20:34 +09:00
2a2474ca09 Update tinyvec to 1.0 (#1689) 2020-09-17 18:09:42 +01:00
509b2e6eec Provide attribute macro for multiple HTTP methods (#1674)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-09-16 22:37:41 +01:00
d707704556 prepare web release 3.0.2 (#1681) 2020-09-15 13:14:14 +01:00
a429ee6646 Add possibility to set address for test_server (#1645) 2020-09-15 12:09:16 +01:00
7f8073233a fix trimming to inaccessible root path (#1678) 2020-09-15 11:32:31 +01:00
4b4c9d1b93 update migration guide
closes #1680
2020-09-14 22:26:03 +01:00
3fde3be3d8 add trybuild tests to routing codegen (#1677) 2020-09-13 16:31:08 +01:00
f861508789 prepare web release 3.0.1 (#1676) 2020-09-13 03:24:44 +01:00
a4546f02d2 make TrailingSlash enum accessible (#1673)
Co-authored-by: Damian Lesiuk <lesiuk@sabre.com>
2020-09-13 00:55:39 +01:00
64a2c13cdf the big three point oh (#1668) 2020-09-11 13:50:10 +01:00
bf53fe5a22 bump actix dependency to v0.10 (#1666) 2020-09-11 12:09:52 +01:00
cf5138e740 fix clippy async_yields_async lints (#1667) 2020-09-11 11:29:17 +01:00
121075c1ef awc: Rename Client::build to Client::builder (#1665) 2020-09-11 09:24:39 +01:00
22089aff87 Improve json, form and query extractor config docs (#1661) 2020-09-10 15:40:20 +01:00
7787638f26 fix CI clippy warnings (#1664) 2020-09-10 14:46:35 +01:00
2f6e9738c4 prepare multipart and actors releases (#1663) 2020-09-10 12:54:27 +01:00
e39d166a17 Fix examples hyperlink in README (#1660) 2020-09-10 00:12:50 +01:00
059d1671d7 prepare release beta 4 (#1659) 2020-09-09 22:14:11 +01:00
3a27580ebe awc: improve module documentation (#1656)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-09-09 14:24:12 +01:00
9d0534999d bump connect and tls versions (#1655) 2020-09-09 09:20:54 +01:00
c54d73e0bb Improve awc websocket docs (#1654)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-09-07 12:04:54 +01:00
9a9d4b182e document all remaining unsafe usages (#1642)
adds some debug assertions where appropriate
2020-09-03 10:00:24 +01:00
4e321595bc extract more config types from Data<T> as well (#1641) 2020-09-02 22:12:07 +01:00
01cbef700f Fix a small typo in a doc comment. (#1649) 2020-08-28 22:16:41 +01:00
8497b5f490 integrate with updated actix-{codec, utils} (#1634) 2020-08-24 10:13:35 +01:00
LJ
75d86a6beb Configurable trailing slash behaviour for NormalizePath (#1639)
Co-authored-by: ljoonal <ljoona@ljoonal.xyz>
2020-08-19 12:21:52 +01:00
3892a95c11 Fix actix-web version to publish 2020-08-18 01:16:18 +09:00
5802eb797f awc,web: Bump up to next beta releases (#1638) 2020-08-18 01:08:40 +09:00
ff2ca0f420 Update rustls to 0.18 (#1637) 2020-08-18 00:28:39 +09:00
59ad1738e9 web: Bump up to 3.0.0-beta.2 (#1636) 2020-08-17 11:32:38 +01:00
aa2bd6fbfb http: Bump up to 2.0.0-beta.3 (#1630) 2020-08-14 19:42:14 +09:00
5aad8e24c7 Re-export all error types from awc (#1621) 2020-08-14 01:24:35 +01:00
6e97bc09f8 Use action to upload docs 2020-08-13 16:04:50 +09:00
160995b8d4 fix awc pool leak (#1626) 2020-08-09 21:49:43 +01:00
187646b2f9 match HttpRequest app_data behavior in ServiceRequest (#1618) 2020-08-09 15:51:38 +01:00
46627be36f add dep graph dot graphs (#1601) 2020-08-09 13:54:35 +01:00
a78380739e require rustls feature for client example (#1625) 2020-08-09 13:32:37 +01:00
cf1c8abe62 prepare release http & awc (#1617) 2020-07-22 01:13:10 +01:00
92b5bcd13f Check format and tweak CI config (#1619) 2020-07-22 00:28:33 +01:00
701bdacfa2 Fix illegal chunked encoding (#1615)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-07-21 17:24:56 +01:00
6dc47c4093 fix soundness concern in h1 decoder (#1614)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-07-21 16:25:33 +01:00
0ec335a39c bump MSRV to 1.42 (#1616) 2020-07-21 16:40:30 +09:00
f8d5ad6b53 Make web::Path a tuple struct with a public inner value (#1594)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-07-21 00:54:26 +01:00
43c362779d also try extracting payload config as Data<T> (#1610) 2020-07-20 17:40:58 +01:00
971ba3eee1 fix continous growth of app data in pooled requests (#1609)
fixes #1606
fixes #1607
2020-07-18 16:17:00 +01:00
238 changed files with 13623 additions and 9921 deletions

View File

@ -1,8 +1,15 @@
blank_issues_enabled: true blank_issues_enabled: true
contact_links: contact_links:
- name: Gitter channel (actix-web) - name: GitHub Discussions
url: https://github.com/actix/actix-web/discussions
about: Actix Web Q&A
- name: Gitter chat (actix-web)
url: https://gitter.im/actix/actix-web url: https://gitter.im/actix/actix-web
about: Please ask and answer questions about the actix-web here. about: Actix Web Q&A
- name: Gitter channel (actix) - name: Gitter chat (actix)
url: https://gitter.im/actix/actix url: https://gitter.im/actix/actix
about: Please ask and answer questions about the actix here. about: Actix (actor framework) Q&A
- name: Actix Discord
url: https://discord.gg/NWpN5mmg3x
about: Actix developer discussion and community chat

View File

@ -1,18 +1,21 @@
## PR Type <!-- Thanks for considering contributing actix! -->
What kind of change does this PR make? <!-- Please fill out the following to get your PR reviewed quicker. -->
## PR Type
<!-- What kind of change does this PR make? -->
<!-- Bug Fix / Feature / Refactor / Code Style / Other --> <!-- Bug Fix / Feature / Refactor / Code Style / Other -->
INSERT_PR_TYPE PR_TYPE
## PR Checklist ## PR Checklist
Check your PR fulfills the following: <!-- Check your PR fulfills the following items. ->>
<!-- For draft PRs check the boxes as you complete them. --> <!-- For draft PRs check the boxes as you complete them. -->
- [ ] Tests for the changes have been added / updated. - [ ] Tests for the changes have been added / updated.
- [ ] Documentation comments have been added / updated. - [ ] Documentation comments have been added / updated.
- [ ] A changelog entry has been made for the appropriate packages. - [ ] A changelog entry has been made for the appropriate packages.
- [ ] Format code with the latest stable rustfmt.
- [ ] (Team) Label with affected crates and semver status.
## Overview ## Overview

View File

@ -1,13 +1,18 @@
name: Benchmark (Linux) name: Benchmark
on: [push, pull_request] on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
jobs: jobs:
check_benchmark: check_benchmark:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@master - uses: actions/checkout@v2
- name: Install Rust - name: Install Rust
uses: actions-rs/toolchain@v1 uses: actions-rs/toolchain@v1

39
.github/workflows/clippy-fmt.yml vendored Normal file
View File

@ -0,0 +1,39 @@
name: Lint
on:
pull_request:
types: [opened, synchronize, reopened]
jobs:
fmt:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
components: rustfmt
- name: Check with rustfmt
uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
clippy:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable
components: clippy
override: true
- name: Check with Clippy
uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --workspace --tests --all-features

View File

@ -1,6 +1,11 @@
name: CI (Linux) name: CI (Linux)
on: [push, pull_request] on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
jobs: jobs:
build_and_test: build_and_test:
@ -8,7 +13,7 @@ jobs:
fail-fast: false fail-fast: false
matrix: matrix:
version: version:
- 1.41.1 # MSRV - 1.46.0 # MSRV
- stable - stable
- nightly - nightly
@ -16,7 +21,7 @@ jobs:
runs-on: ubuntu-latest runs-on: ubuntu-latest
steps: steps:
- uses: actions/checkout@master - uses: actions/checkout@v2
- name: Install ${{ matrix.version }} - name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1 uses: actions-rs/toolchain@v1
@ -25,6 +30,13 @@ jobs:
profile: minimal profile: minimal
override: true override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with:
command: generate-lockfile
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.0.1
- name: check build - name: check build
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
@ -53,12 +65,17 @@ jobs:
args: --package=awc --no-default-features --features=rustls -- --nocapture args: --package=awc --no-default-features --features=rustls -- --nocapture
- name: Generate coverage file - name: Generate coverage file
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request') if: matrix.version == 'stable' && github.ref == 'refs/heads/master'
run: | run: |
cargo install cargo-tarpaulin --vers "^0.13" cargo install cargo-tarpaulin --vers "^0.13"
cargo tarpaulin --out Xml cargo tarpaulin --out Xml
- name: Upload to Codecov - name: Upload to Codecov
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request') if: matrix.version == 'stable' && github.ref == 'refs/heads/master'
uses: codecov/codecov-action@v1 uses: codecov/codecov-action@v1
with: with:
file: cobertura.xml file: cobertura.xml
- name: Clear the cargo caches
run: |
cargo install cargo-cache --no-default-features --features ci-autoclean
cargo-cache

View File

@ -1,6 +1,11 @@
name: CI (macOS) name: CI (macOS)
on: [push, pull_request] on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
jobs: jobs:
build_and_test: build_and_test:
@ -15,7 +20,7 @@ jobs:
runs-on: macOS-latest runs-on: macOS-latest
steps: steps:
- uses: actions/checkout@master - uses: actions/checkout@v2
- name: Install ${{ matrix.version }} - name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1 uses: actions-rs/toolchain@v1
@ -24,6 +29,13 @@ jobs:
profile: minimal profile: minimal
override: true override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with:
command: generate-lockfile
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.0.1
- name: check build - name: check build
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
@ -37,3 +49,8 @@ jobs:
args: --all --all-features --no-fail-fast -- --nocapture args: --all --all-features --no-fail-fast -- --nocapture
--skip=test_h2_content_length --skip=test_h2_content_length
--skip=test_reading_deflate_encoding_large_random_rustls --skip=test_reading_deflate_encoding_large_random_rustls
- name: Clear the cargo caches
run: |
cargo install cargo-cache --no-default-features --features ci-autoclean
cargo-cache

View File

@ -11,12 +11,12 @@ jobs:
if: github.repository == 'actix/actix-web' if: github.repository == 'actix/actix-web'
steps: steps:
- uses: actions/checkout@master - uses: actions/checkout@v2
- name: Install Rust - name: Install Rust
uses: actions-rs/toolchain@v1 uses: actions-rs/toolchain@v1
with: with:
toolchain: stable-x86_64-unknown-linux-gnu toolchain: nightly-x86_64-unknown-linux-gnu
profile: minimal profile: minimal
override: true override: true
@ -24,12 +24,14 @@ jobs:
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
command: doc command: doc
args: --no-deps --workspace --all-features args: --workspace --all-features --no-deps
- name: Tweak HTML - name: Tweak HTML
run: echo "<meta http-equiv=refresh content=0;url=os_balloon/index.html>" > target/doc/index.html run: echo "<meta http-equiv=refresh content=0;url=os_balloon/index.html>" > target/doc/index.html
- name: Upload documentation - name: Deploy to GitHub Pages
run: | uses: JamesIves/github-pages-deploy-action@3.7.1
git clone https://github.com/davisp/ghp-import.git with:
./ghp-import/ghp_import.py -n -p -f -m "Documentation upload" -r https://${{ secrets.GITHUB_TOKEN }}@github.com/"${{ github.repository }}.git" target/doc GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BRANCH: gh-pages
FOLDER: target/doc

View File

@ -1,6 +1,11 @@
name: CI (Windows) name: CI (Windows)
on: [push, pull_request] on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
env: env:
VCPKGRS_DYNAMIC: 1 VCPKGRS_DYNAMIC: 1
@ -18,7 +23,7 @@ jobs:
runs-on: windows-latest runs-on: windows-latest
steps: steps:
- uses: actions/checkout@master - uses: actions/checkout@v2
- name: Install ${{ matrix.version }} - name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1 uses: actions-rs/toolchain@v1
@ -36,6 +41,13 @@ jobs:
Get-ChildItem C:\vcpkg\installed\x64-windows\bin Get-ChildItem C:\vcpkg\installed\x64-windows\bin
Get-ChildItem C:\vcpkg\installed\x64-windows\lib Get-ChildItem C:\vcpkg\installed\x64-windows\lib
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with:
command: generate-lockfile
- name: Cache Dependencies
uses: Swatinem/rust-cache@v1.0.1
- name: check build - name: check build
uses: actions-rs/cargo@v1 uses: actions-rs/cargo@v1
with: with:
@ -57,3 +69,8 @@ jobs:
--skip=test_connection_force_close --skip=test_connection_force_close
--skip=test_connection_server_close --skip=test_connection_server_close
--skip=test_connection_wait_queue_force_close --skip=test_connection_wait_queue_force_close
- name: Clear the cargo caches
run: |
cargo install cargo-cache --no-default-features --features ci-autoclean
cargo-cache

4
.gitignore vendored
View File

@ -9,6 +9,10 @@ guide/build/
*.pid *.pid
*.sock *.sock
*~ *~
.DS_Store
# These are backup files generated by rustfmt # These are backup files generated by rustfmt
**/*.rs.bk **/*.rs.bk
# Configuration directory generated by CLion
.idea

View File

@ -1,6 +1,202 @@
# Changes # Changes
## Unreleased - 2020-xx-xx ## Unreleased - 2021-xx-xx
## 4.0.0-beta.2 - 2021-xx-xx
### Added
* The method `Either<web::Json<T>, web::Form<T>>::into_inner()` which returns the inner type for
whichever variant was created. Also works for `Either<web::Form<T>, web::Json<T>>`. [#1894]
* Add `services!` macro for helping register multiple services to `App`. [#1933]
* Enable registering a vec of services of the same type to `App` [#1933]
### Changed
* Rework `Responder` trait to be sync and returns `Response`/`HttpResponse` directly.
Making it simpler and more performant. [#1891]
* `ServiceRequest::into_parts` and `ServiceRequest::from_parts` can no longer fail. [#1893]
* `ServiceRequest::from_request` can no longer fail. [#1893]
* Our `Either` type now uses `Left`/`Right` variants (instead of `A`/`B`) [#1894]
* `test::{call_service, read_response, read_response_json, send_request}` take `&Service`
in argument [#1905]
* `App::wrap_fn`, `Resource::wrap_fn` and `Scope::wrap_fn` provide `&Service` in closure
argument. [#1905]
* `web::block` no longer requires the output is a Result. [#1957]
### Fixed
* Multiple calls to `App::data` with the same type now keeps the latest call's data. [#1906]
### Removed
* Public field of `web::Path` has been made private. [#1894]
* Public field of `web::Query` has been made private. [#1894]
* `TestRequest::with_header`; use `TestRequest::default().insert_header()`. [#1869]
* `AppService::set_service_data`; for custom HTTP service factories adding application data, use the
layered data model by calling `ServiceRequest::add_data_container` when handling
requests instead. [#1906]
[#1891]: https://github.com/actix/actix-web/pull/1891
[#1893]: https://github.com/actix/actix-web/pull/1893
[#1894]: https://github.com/actix/actix-web/pull/1894
[#1869]: https://github.com/actix/actix-web/pull/1869
[#1905]: https://github.com/actix/actix-web/pull/1905
[#1906]: https://github.com/actix/actix-web/pull/1906
[#1933]: https://github.com/actix/actix-web/pull/1933
[#1957]: https://github.com/actix/actix-web/pull/1957
## 4.0.0-beta.1 - 2021-01-07
### Added
* `Compat` middleware enabling generic response body/error type of middlewares like `Logger` and
`Compress` to be used in `middleware::Condition` and `Resource`, `Scope` services. [#1865]
### Changed
* Update `actix-*` dependencies to tokio `1.0` based versions. [#1813]
* Bumped `rand` to `0.8`.
* Update `rust-tls` to `0.19`. [#1813]
* Rename `Handler` to `HandlerService` and rename `Factory` to `Handler`. [#1852]
* The default `TrailingSlash` is now `Trim`, in line with existing documentation. See migration
guide for implications. [#1875]
* Rename `DefaultHeaders::{content_type => add_content_type}`. [#1875]
* MSRV is now 1.46.0.
### Fixed
* Added the underlying parse error to `test::read_body_json`'s panic message. [#1812]
### Removed
* Public modules `middleware::{normalize, err_handlers}`. All necessary middleware structs are now
exposed directly by the `middleware` module.
* Remove `actix-threadpool` as dependency. `actix_threadpool::BlockingError` error type can be imported
from `actix_web::error` module. [#1878]
[#1812]: https://github.com/actix/actix-web/pull/1812
[#1813]: https://github.com/actix/actix-web/pull/1813
[#1852]: https://github.com/actix/actix-web/pull/1852
[#1865]: https://github.com/actix/actix-web/pull/1865
[#1875]: https://github.com/actix/actix-web/pull/1875
[#1878]: https://github.com/actix/actix-web/pull/1878
## 3.3.2 - 2020-12-01
### Fixed
* Removed an occasional `unwrap` on `None` panic in `NormalizePathNormalization`. [#1762]
* Fix `match_pattern()` returning `None` for scope with empty path resource. [#1798]
* Increase minimum `socket2` version. [#1803]
[#1762]: https://github.com/actix/actix-web/pull/1762
[#1798]: https://github.com/actix/actix-web/pull/1798
[#1803]: https://github.com/actix/actix-web/pull/1803
## 3.3.1 - 2020-11-29
* Ensure `actix-http` dependency uses same `serde_urlencoded`.
## 3.3.0 - 2020-11-25
### Added
* Add `Either<A, B>` extractor helper. [#1788]
### Changed
* Upgrade `serde_urlencoded` to `0.7`. [#1773]
[#1773]: https://github.com/actix/actix-web/pull/1773
[#1788]: https://github.com/actix/actix-web/pull/1788
## 3.2.0 - 2020-10-30
### Added
* Implement `exclude_regex` for Logger middleware. [#1723]
* Add request-local data extractor `web::ReqData`. [#1748]
* Add ability to register closure for request middleware logging. [#1749]
* Add `app_data` to `ServiceConfig`. [#1757]
* Expose `on_connect` for access to the connection stream before request is handled. [#1754]
### Changed
* Updated actix-web-codegen dependency for access to new `#[route(...)]` multi-method macro.
* Print non-configured `Data<T>` type when attempting extraction. [#1743]
* Re-export bytes::Buf{Mut} in web module. [#1750]
* Upgrade `pin-project` to `1.0`.
[#1723]: https://github.com/actix/actix-web/pull/1723
[#1743]: https://github.com/actix/actix-web/pull/1743
[#1748]: https://github.com/actix/actix-web/pull/1748
[#1750]: https://github.com/actix/actix-web/pull/1750
[#1754]: https://github.com/actix/actix-web/pull/1754
[#1749]: https://github.com/actix/actix-web/pull/1749
## 3.1.0 - 2020-09-29
### Changed
* Add `TrailingSlash::MergeOnly` behaviour to `NormalizePath`, which allows `NormalizePath`
to retain any trailing slashes. [#1695]
* Remove bound `std::marker::Sized` from `web::Data` to support storing `Arc<dyn Trait>`
via `web::Data::from` [#1710]
### Fixed
* `ResourceMap` debug printing is no longer infinitely recursive. [#1708]
[#1695]: https://github.com/actix/actix-web/pull/1695
[#1708]: https://github.com/actix/actix-web/pull/1708
[#1710]: https://github.com/actix/actix-web/pull/1710
## 3.0.2 - 2020-09-15
### Fixed
* `NormalizePath` when used with `TrailingSlash::Trim` no longer trims the root path "/". [#1678]
[#1678]: https://github.com/actix/actix-web/pull/1678
## 3.0.1 - 2020-09-13
### Changed
* `middleware::normalize::TrailingSlash` enum is now accessible. [#1673]
[#1673]: https://github.com/actix/actix-web/pull/1673
## 3.0.0 - 2020-09-11
* No significant changes from `3.0.0-beta.4`.
## 3.0.0-beta.4 - 2020-09-09
### Added
* `middleware::NormalizePath` now has configurable behaviour for either always having a trailing
slash, or as the new addition, always trimming trailing slashes. [#1639]
### Changed
* Update actix-codec and actix-utils dependencies. [#1634]
* `FormConfig` and `JsonConfig` configurations are now also considered when set
using `App::data`. [#1641]
* `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`. [#1655]
* `HttpServer::maxconnrate` is renamed to the more expressive
`HttpServer::max_connection_rate`. [#1655]
[#1639]: https://github.com/actix/actix-web/pull/1639
[#1641]: https://github.com/actix/actix-web/pull/1641
[#1634]: https://github.com/actix/actix-web/pull/1634
[#1655]: https://github.com/actix/actix-web/pull/1655
## 3.0.0-beta.3 - 2020-08-17
### Changed
* Update `rustls` to 0.18
## 3.0.0-beta.2 - 2020-08-17
### Changed
* `PayloadConfig` is now also considered in `Bytes` and `String` extractors when set
using `App::data`. [#1610]
* `web::Path` now has a public representation: `web::Path(pub T)` that enables
destructuring. [#1594]
* `ServiceRequest::app_data` allows retrieval of non-Data data without splitting into parts to
access `HttpRequest` which already allows this. [#1618]
* Re-export all error types from `awc`. [#1621]
* MSRV is now 1.42.0.
### Fixed
* Memory leak of app data in pooled requests. [#1609]
[#1594]: https://github.com/actix/actix-web/pull/1594
[#1609]: https://github.com/actix/actix-web/pull/1609
[#1610]: https://github.com/actix/actix-web/pull/1610
[#1618]: https://github.com/actix/actix-web/pull/1618
[#1621]: https://github.com/actix/actix-web/pull/1621
## 3.0.0-beta.1 - 2020-07-13 ## 3.0.0-beta.1 - 2020-07-13
@ -109,7 +305,7 @@
### Deleted ### Deleted
* Delete HttpServer::run(), it is not useful witht async/await * Delete HttpServer::run(), it is not useful with async/await
## [2.0.0-alpha.3] - 2019-12-07 ## [2.0.0-alpha.3] - 2019-12-07
@ -154,7 +350,7 @@
### Changed ### Changed
* Make UrlEncodedError::Overflow more informativve * Make UrlEncodedError::Overflow more informative
* Use actix-testing for testing utils * Use actix-testing for testing utils
@ -172,7 +368,7 @@
* Re-implement Host predicate (#989) * Re-implement Host predicate (#989)
* Form immplements Responder, returning a `application/x-www-form-urlencoded` response * Form implements Responder, returning a `application/x-www-form-urlencoded` response
* Add `into_inner` to `Data` * Add `into_inner` to `Data`

View File

@ -34,10 +34,13 @@ This Code of Conduct applies both within project spaces and in public spaces whe
## Enforcement ## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at fafhrd91@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at robjtede@icloud.com ([@robjtede]) or huyuumi@neet.club ([@JohnTitor]). The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
[@robjtede]: https://github.com/robjtede
[@JohnTitor]: https://github.com/JohnTitor
## Attribution ## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]

View File

@ -1,8 +1,8 @@
[package] [package]
name = "actix-web" name = "actix-web"
version = "3.0.0-beta.1" version = "4.0.0-beta.2"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"] authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust." description = "Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust"
readme = "README.md" readme = "README.md"
keywords = ["actix", "http", "web", "framework", "async"] keywords = ["actix", "http", "web", "framework", "async"]
homepage = "https://actix.rs" homepage = "https://actix.rs"
@ -34,7 +34,7 @@ members = [
"actix-multipart", "actix-multipart",
"actix-web-actors", "actix-web-actors",
"actix-web-codegen", "actix-web-codegen",
"test-server", "actix-http-test",
] ]
[features] [features]
@ -47,10 +47,10 @@ compress = ["actix-http/compress", "awc/compress"]
secure-cookies = ["actix-http/secure-cookies"] secure-cookies = ["actix-http/secure-cookies"]
# openssl # openssl
openssl = ["actix-tls/openssl", "awc/openssl", "open-ssl"] openssl = ["tls_openssl", "actix-tls/accept", "actix-tls/openssl", "awc/openssl"]
# rustls # rustls
rustls = ["actix-tls/rustls", "awc/rustls", "rust-tls"] rustls = ["tls_rustls", "actix-tls/accept", "actix-tls/rustls", "awc/rustls"]
[[example]] [[example]]
name = "basic" name = "basic"
@ -64,47 +64,53 @@ required-features = ["compress"]
name = "test_server" name = "test_server"
required-features = ["compress"] required-features = ["compress"]
[[example]]
name = "on_connect"
required-features = []
[[example]]
name = "client"
required-features = ["rustls"]
[dependencies] [dependencies]
actix-codec = "0.2.0" actix-codec = "0.4.0-beta.1"
actix-service = "1.0.2" actix-macros = "0.2.0"
actix-utils = "1.0.6" actix-router = "0.2.7"
actix-router = "0.2.4" actix-rt = "2"
actix-rt = "1.1.1" actix-server = "2.0.0-beta.3"
actix-server = "1.0.0" actix-service = "2.0.0-beta.4"
actix-testing = "1.0.0" actix-utils = "3.0.0-beta.2"
actix-macros = "0.1.0" actix-tls = { version = "3.0.0-beta.3", default-features = false, optional = true }
actix-threadpool = "0.3.1"
actix-tls = "2.0.0-alpha.1"
actix-web-codegen = "0.3.0-beta.1" actix-web-codegen = "0.4.0"
actix-http = "2.0.0-alpha.4" actix-http = "3.0.0-beta.2"
awc = { version = "2.0.0-beta.1", default-features = false } awc = { version = "3.0.0-beta.2", default-features = false }
bytes = "0.5.3" ahash = "0.7"
derive_more = "0.99.2" bytes = "1"
derive_more = "0.99.5"
either = "1.5.3"
encoding_rs = "0.8" encoding_rs = "0.8"
futures-channel = { version = "0.3.5", default-features = false } futures-core = { version = "0.3.7", default-features = false }
futures-core = { version = "0.3.5", default-features = false } futures-util = { version = "0.3.7", default-features = false }
futures-util = { version = "0.3.5", default-features = false }
fxhash = "0.2.1"
log = "0.4" log = "0.4"
mime = "0.3" mime = "0.3"
socket2 = "0.3" socket2 = "0.3.16"
pin-project = "0.4.17" pin-project = "1.0.0"
regex = "1.3" regex = "1.4"
serde = { version = "1.0", features=["derive"] } serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0" serde_json = "1.0"
serde_urlencoded = "0.6.1" serde_urlencoded = "0.7"
time = { version = "0.2.7", default-features = false, features = ["std"] } time = { version = "0.2.23", default-features = false, features = ["std"] }
url = "2.1" url = "2.1"
open-ssl = { package = "openssl", version = "0.10", optional = true } tls_openssl = { package = "openssl", version = "0.10.9", optional = true }
rust-tls = { package = "rustls", version = "0.17.0", optional = true } tls_rustls = { package = "rustls", version = "0.19.0", optional = true }
tinyvec = { version = "0.3", features = ["alloc"] } smallvec = "1.6"
[dev-dependencies] [dev-dependencies]
actix = "0.10.0-alpha.1" actix = { version = "0.11.0-beta.2", default-features = false }
rand = "0.7" rand = "0.8"
env_logger = "0.7" env_logger = "0.8"
serde_derive = "1.0" serde_derive = "1.0"
brotli2 = "0.3.2" brotli2 = "0.3.2"
flate2 = "1.0.13" flate2 = "1.0.13"
@ -118,10 +124,11 @@ codegen-units = 1
[patch.crates-io] [patch.crates-io]
actix-web = { path = "." } actix-web = { path = "." }
actix-http = { path = "actix-http" } actix-http = { path = "actix-http" }
actix-http-test = { path = "test-server" } actix-http-test = { path = "actix-http-test" }
actix-web-actors = { path = "actix-web-actors" }
actix-web-codegen = { path = "actix-web-codegen" } actix-web-codegen = { path = "actix-web-codegen" }
actix-files = { path = "actix-files" }
actix-multipart = { path = "actix-multipart" } actix-multipart = { path = "actix-multipart" }
actix-files = { path = "actix-files" }
awc = { path = "awc" } awc = { path = "awc" }
[[bench]] [[bench]]
@ -131,3 +138,7 @@ harness = false
[[bench]] [[bench]]
name = "service" name = "service"
harness = false harness = false
[[bench]]
name = "responder"
harness = false

View File

@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier same "printed page" as the copyright notice for easier
identification within third-party archives. identification within third-party archives.
Copyright 2017-NOW Nikolay Kim Copyright 2017-NOW Actix Team
Licensed under the Apache License, Version 2.0 (the "License"); Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License. you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
Copyright (c) 2017 Nikolay Kim Copyright (c) 2017 Actix Team
Permission is hereby granted, free of charge, to any Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated person obtaining a copy of this software and associated

View File

@ -1,17 +1,70 @@
## Unreleased ## Unreleased
* The default `NormalizePath` behavior now strips trailing slashes by default. This was
previously documented to be the case in v3 but the behavior now matches. The effect is that
routes defined with trailing slashes will become inaccessible when
using `NormalizePath::default()`.
Before: `#[get("/test/")`
After: `#[get("/test")`
Alternatively, explicitly require trailing slashes: `NormalizePath::new(TrailingSlash::Always)`.
## 3.0.0
* The return type for `ServiceRequest::app_data::<T>()` was changed from returning a `Data<T>` to
simply a `T`. To access a `Data<T>` use `ServiceRequest::app_data::<Data<T>>()`.
* Cookie handling has been offloaded to the `cookie` crate:
* `USERINFO_ENCODE_SET` is no longer exposed. Percent-encoding is still supported; check docs.
* Some types now require lifetime parameters.
* The time crate was updated to `v0.2`, a major breaking change to the time crate, which affects
any `actix-web` method previously expecting a time v0.1 input.
* Setting a cookie's SameSite property, explicitly, to `SameSite::None` will now * Setting a cookie's SameSite property, explicitly, to `SameSite::None` will now
result in `SameSite=None` being sent with the response Set-Cookie header. result in `SameSite=None` being sent with the response Set-Cookie header.
To create a cookie without a SameSite attribute, remove any calls setting same_site. To create a cookie without a SameSite attribute, remove any calls setting same_site.
* actix-http support for Actors messages was moved to actix-http crate and is enabled * actix-http support for Actors messages was moved to actix-http crate and is enabled
with feature `actors` with feature `actors`
* content_length function is removed from actix-http. * content_length function is removed from actix-http.
You can set Content-Length by normally setting the response body or calling no_chunking function. You can set Content-Length by normally setting the response body or calling no_chunking function.
* `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a * `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a
`u64` instead of a `usize`. `u64` instead of a `usize`.
* Code that was using `path.<index>` to access a `web::Path<(A, B, C)>`s elements now needs to use
destructuring or `.into_inner()`. For example:
```rust
// Previously:
async fn some_route(path: web::Path<(String, String)>) -> String {
format!("Hello, {} {}", path.0, path.1)
}
// Now (this also worked before):
async fn some_route(path: web::Path<(String, String)>) -> String {
let (first_name, last_name) = path.into_inner();
format!("Hello, {} {}", first_name, last_name)
}
// Or (this wasn't previously supported):
async fn some_route(web::Path((first_name, last_name)): web::Path<(String, String)>) -> String {
format!("Hello, {} {}", first_name, last_name)
}
```
* `middleware::NormalizePath` can now also be configured to trim trailing slashes instead of always keeping one.
It will need `middleware::normalize::TrailingSlash` when being constructed with `NormalizePath::new(...)`,
or for an easier migration you can replace `wrap(middleware::NormalizePath)` with `wrap(middleware::NormalizePath::new(TrailingSlash::MergeOnly))`.
* `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`.
* `HttpServer::maxconnrate` is renamed to the more expressive `HttpServer::max_connection_rate`.
## 2.0.0 ## 2.0.0
* `HttpServer::start()` renamed to `HttpServer::run()`. It also possible to * `HttpServer::start()` renamed to `HttpServer::run()`. It also possible to

View File

@ -1,19 +1,20 @@
<div align="center"> <div align="center">
<h1>Actix web</h1> <h1>Actix Web</h1>
<p> <p>
<strong>Actix web is a powerful, pragmatic, and extremely fast web framework for Rust</strong> <strong>Actix Web is a powerful, pragmatic, and extremely fast web framework for Rust</strong>
</p> </p>
<p> <p>
[![crates.io](https://meritbadge.herokuapp.com/actix-web)](https://crates.io/crates/actix-web) [![crates.io](https://img.shields.io/crates/v/actix-web?label=latest)](https://crates.io/crates/actix-web)
[![Documentation](https://docs.rs/actix-web/badge.svg)](https://docs.rs/actix-web) [![Documentation](https://docs.rs/actix-web/badge.svg?version=4.0.0-beta.2)](https://docs.rs/actix-web/4.0.0-beta.2)
[![Version](https://img.shields.io/badge/rustc-1.41+-lightgray.svg)](https://blog.rust-lang.org/2020/02/27/Rust-1.41.1.html) [![Version](https://img.shields.io/badge/rustc-1.46+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
![License](https://img.shields.io/crates/l/actix-web.svg) ![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-web.svg)
[![Dependency Status](https://deps.rs/crate/actix-web/4.0.0-beta.2/status.svg)](https://deps.rs/crate/actix-web/4.0.0-beta.2)
<br /> <br />
[![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![build status](https://github.com/actix/actix-web/workflows/CI%20%28Linux%29/badge.svg?branch=master&event=push)](https://github.com/actix/actix-web/actions)
[![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web)
[![Download](https://img.shields.io/crates/d/actix-web.svg)](https://crates.io/crates/actix-web) ![downloads](https://img.shields.io/crates/d/actix-web.svg)
[![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
</p> </p>
</div> </div>
@ -32,22 +33,17 @@
* Middlewares ([Logger, Session, CORS, etc](https://actix.rs/docs/middleware/)) * Middlewares ([Logger, Session, CORS, etc](https://actix.rs/docs/middleware/))
* Includes an async [HTTP client](https://actix.rs/actix-web/actix_web/client/index.html) * Includes an async [HTTP client](https://actix.rs/actix-web/actix_web/client/index.html)
* Supports [Actix actor framework](https://github.com/actix/actix) * Supports [Actix actor framework](https://github.com/actix/actix)
* Runs on stable Rust 1.41+ * Runs on stable Rust 1.46+
## Documentation ## Documentation
* [Website & User Guide](https://actix.rs) * [Website & User Guide](https://actix.rs)
* [Examples Repository](https://actix.rs/actix-web/actix_web) * [Examples Repository](https://github.com/actix/examples)
* [API Documentation](https://docs.rs/actix-web) * [API Documentation](https://docs.rs/actix-web)
* [API Documentation (master branch)](https://actix.rs/actix-web/actix_web) * [API Documentation (master branch)](https://actix.rs/actix-web/actix_web)
## Example ## Example
<h2>
WARNING: This example is for the master branch which is currently in beta stages for v3. For
Actix web v2 see the <a href="https://actix.rs/docs/getting-started/">getting started guide</a>.
</h2>
Dependencies: Dependencies:
```toml ```toml
@ -61,8 +57,8 @@ Code:
use actix_web::{get, web, App, HttpServer, Responder}; use actix_web::{get, web, App, HttpServer, Responder};
#[get("/{id}/{name}/index.html")] #[get("/{id}/{name}/index.html")]
async fn index(info: web::Path<(u32, String)>) -> impl Responder { async fn index(web::Path((id, name)): web::Path<(u32, String)>) -> impl Responder {
format!("Hello {}! id:{}", info.1, info.0) format!("Hello {}! id:{}", name, id)
} }
#[actix_web::main] #[actix_web::main]
@ -102,13 +98,13 @@ One of the fastest web frameworks available according to the
This project is licensed under either of This project is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or * Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
[http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0)) [http://www.apache.org/licenses/LICENSE-2.0])
* MIT license ([LICENSE-MIT](LICENSE-MIT) or * MIT license ([LICENSE-MIT](LICENSE-MIT) or
[http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT)) [http://opensource.org/licenses/MIT])
at your option. at your option.
## Code of Conduct ## Code of Conduct
Contribution to the actix-web crate is organized under the terms of the Contributor Covenant, the Contribution to the actix-web repo is organized under the terms of the Contributor Covenant.
maintainers of Actix web, promises to intervene to uphold that code of conduct. The Actix team promises to intervene to uphold that code of conduct.

View File

@ -1,11 +0,0 @@
# Cors Middleware for actix web framework [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-cors)](https://crates.io/crates/actix-cors) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
**This crate moved to https://github.com/actix/actix-extras.**
## Documentation & community resources
* [User Guide](https://actix.rs/docs/)
* [API Documentation](https://docs.rs/actix-cors/)
* [Chat on gitter](https://gitter.im/actix/actix)
* Cargo package: [actix-cors](https://crates.io/crates/actix-cors)
* Minimum supported Rust version: 1.34 or later

View File

@ -1,12 +1,49 @@
# Changes # Changes
## [Unreleased] - 2020-xx-xx ## Unreleased - 2021-xx-xx
## [0.3.0-beta.1] - 2020-07-15
## 0.6.0-beta.2 - 2021-02-10
* Fix If-Modified-Since and If-Unmodified-Since to not compare using sub-second timestamps. [#1887]
* Replace `v_htmlescape` with `askama_escape`. [#1953]
[#1887]: https://github.com/actix/actix-web/pull/1887
[#1953]: https://github.com/actix/actix-web/pull/1953
## 0.6.0-beta.1 - 2021-01-07
* `HttpRange::parse` now has its own error type.
* Update `bytes` to `1.0`. [#1813]
[#1813]: https://github.com/actix/actix-web/pull/1813
## 0.5.0 - 2020-12-26
* Optionally support hidden files/directories. [#1811]
[#1811]: https://github.com/actix/actix-web/pull/1811
## 0.4.1 - 2020-11-24
* Clarify order of parameters in `Files::new` and improve docs.
## 0.4.0 - 2020-10-06
* Add `Files::prefer_utf8` option that adds UTF-8 charset on certain response types. [#1714]
[#1714]: https://github.com/actix/actix-web/pull/1714
## 0.3.0 - 2020-09-11
* No significant changes from 0.3.0-beta.1.
## 0.3.0-beta.1 - 2020-07-15
* Update `v_htmlescape` to 0.10 * Update `v_htmlescape` to 0.10
* Update `actix-web` and `actix-http` dependencies to beta.1 * Update `actix-web` and `actix-http` dependencies to beta.1
## [0.3.0-alpha.1] - 2020-05-23
## 0.3.0-alpha.1 - 2020-05-23
* Update `actix-web` and `actix-http` dependencies to alpha * Update `actix-web` and `actix-http` dependencies to alpha
* Fix some typos in the docs * Fix some typos in the docs
* Bump minimum supported Rust version to 1.40 * Bump minimum supported Rust version to 1.40
@ -14,77 +51,73 @@
[#1384]: https://github.com/actix/actix-web/pull/1384 [#1384]: https://github.com/actix/actix-web/pull/1384
## [0.2.1] - 2019-12-22
## 0.2.1 - 2019-12-22
* Use the same format for file URLs regardless of platforms * Use the same format for file URLs regardless of platforms
## [0.2.0] - 2019-12-20
## 0.2.0 - 2019-12-20
* Fix BodyEncoding trait import #1220 * Fix BodyEncoding trait import #1220
## [0.2.0-alpha.1] - 2019-12-07
## 0.2.0-alpha.1 - 2019-12-07
* Migrate to `std::future` * Migrate to `std::future`
## [0.1.7] - 2019-11-06
* Add an additional `filename*` param in the `Content-Disposition` header of `actix_files::NamedFile` to be more compatible. (#1151) ## 0.1.7 - 2019-11-06
* Add an additional `filename*` param in the `Content-Disposition` header of
## [0.1.6] - 2019-10-14 `actix_files::NamedFile` to be more compatible. (#1151)
## 0.1.6 - 2019-10-14
* Add option to redirect to a slash-ended path `Files` #1132 * Add option to redirect to a slash-ended path `Files` #1132
## [0.1.5] - 2019-10-08
## 0.1.5 - 2019-10-08
* Bump up `mime_guess` crate version to 2.0.1 * Bump up `mime_guess` crate version to 2.0.1
* Bump up `percent-encoding` crate version to 2.1 * Bump up `percent-encoding` crate version to 2.1
* Allow user defined request guards for `Files` #1113 * Allow user defined request guards for `Files` #1113
## [0.1.4] - 2019-07-20
## 0.1.4 - 2019-07-20
* Allow to disable `Content-Disposition` header #686 * Allow to disable `Content-Disposition` header #686
## [0.1.3] - 2019-06-28
## 0.1.3 - 2019-06-28
* Do not set `Content-Length` header, let actix-http set it #930 * Do not set `Content-Length` header, let actix-http set it #930
## [0.1.2] - 2019-06-13
## 0.1.2 - 2019-06-13
* Content-Length is 0 for NamedFile HEAD request #914 * Content-Length is 0 for NamedFile HEAD request #914
* Fix ring dependency from actix-web default features for #741 * Fix ring dependency from actix-web default features for #741
## [0.1.1] - 2019-06-01
## 0.1.1 - 2019-06-01
* Static files are incorrectly served as both chunked and with length #812 * Static files are incorrectly served as both chunked and with length #812
## [0.1.0] - 2019-05-25
* NamedFile last-modified check always fails due to nano-seconds ## 0.1.0 - 2019-05-25
in file modified date #820 * NamedFile last-modified check always fails due to nano-seconds in file modified date #820
## [0.1.0-beta.4] - 2019-05-12
## 0.1.0-beta.4 - 2019-05-12
* Update actix-web to beta.4 * Update actix-web to beta.4
## [0.1.0-beta.1] - 2019-04-20
## 0.1.0-beta.1 - 2019-04-20
* Update actix-web to beta.1 * Update actix-web to beta.1
## [0.1.0-alpha.6] - 2019-04-14
## 0.1.0-alpha.6 - 2019-04-14
* Update actix-web to alpha6 * Update actix-web to alpha6
## [0.1.0-alpha.4] - 2019-04-08
## 0.1.0-alpha.4 - 2019-04-08
* Update actix-web to alpha4 * Update actix-web to alpha4
## [0.1.0-alpha.2] - 2019-04-02
## 0.1.0-alpha.2 - 2019-04-02
* Add default handler support * Add default handler support
## [0.1.0-alpha.1] - 2019-03-28
## 0.1.0-alpha.1 - 2019-03-28
* Initial impl * Initial impl

View File

@ -1,8 +1,8 @@
[package] [package]
name = "actix-files" name = "actix-files"
version = "0.3.0-beta.1" version = "0.6.0-beta.2"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"] authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Static files support for actix web." description = "Static file serving for Actix Web"
readme = "README.md" readme = "README.md"
keywords = ["actix", "http", "async", "futures"] keywords = ["actix", "http", "async", "futures"]
homepage = "https://actix.rs" homepage = "https://actix.rs"
@ -17,20 +17,20 @@ name = "actix_files"
path = "src/lib.rs" path = "src/lib.rs"
[dependencies] [dependencies]
actix-web = { version = "3.0.0-beta.1", default-features = false } actix-web = { version = "4.0.0-beta.2", default-features = false }
actix-http = "2.0.0-beta.1" actix-service = "2.0.0-beta.4"
actix-service = "1.0.1"
askama_escape = "0.10"
bitflags = "1" bitflags = "1"
bytes = "0.5.3" bytes = "1"
futures-core = { version = "0.3.5", default-features = false } futures-core = { version = "0.3.7", default-features = false }
futures-util = { version = "0.3.5", default-features = false } futures-util = { version = "0.3.7", default-features = false }
derive_more = "0.99.2" derive_more = "0.99.5"
log = "0.4" log = "0.4"
mime = "0.3" mime = "0.3"
mime_guess = "2.0.1" mime_guess = "2.0.1"
percent-encoding = "2.1" percent-encoding = "2.1"
v_htmlescape = "0.10"
[dev-dependencies] [dev-dependencies]
actix-rt = "1.0.0" actix-rt = "2"
actix-web = { version = "3.0.0-beta.1", features = ["openssl"] } actix-web = "4.0.0-beta.2"

View File

@ -1,9 +1,19 @@
# Static files support for actix web [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-files)](https://crates.io/crates/actix-files) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) # actix-files
## Documentation & community resources > Static file serving for Actix Web
* [User Guide](https://actix.rs/docs/) [![crates.io](https://img.shields.io/crates/v/actix-files?label=latest)](https://crates.io/crates/actix-files)
* [API Documentation](https://docs.rs/actix-files/) [![Documentation](https://docs.rs/actix-files/badge.svg?version=0.5.0)](https://docs.rs/actix-files/0.5.0)
* [Chat on gitter](https://gitter.im/actix/actix) [![Version](https://img.shields.io/badge/rustc-1.46+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
* Cargo package: [actix-files](https://crates.io/crates/actix-files) ![License](https://img.shields.io/crates/l/actix-files.svg)
* Minimum supported Rust version: 1.40 or later <br />
[![dependency status](https://deps.rs/crate/actix-files/0.5.0/status.svg)](https://deps.rs/crate/actix-files/0.5.0)
[![Download](https://img.shields.io/crates/d/actix-files.svg)](https://crates.io/crates/actix-files)
[![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
## Documentation & Resources
- [API Documentation](https://docs.rs/actix-files/)
- [Example Project](https://github.com/actix/examples/tree/master/static_index)
- [Chat on Gitter](https://gitter.im/actix/actix-web)
- Minimum supported Rust version: 1.46 or later

104
actix-files/src/chunked.rs Normal file
View File

@ -0,0 +1,104 @@
use std::{
cmp, fmt,
fs::File,
future::Future,
io::{self, Read, Seek},
pin::Pin,
task::{Context, Poll},
};
use actix_web::{
error::{BlockingError, Error},
rt::task::{spawn_blocking, JoinHandle},
};
use bytes::Bytes;
use futures_core::{ready, Stream};
#[doc(hidden)]
/// A helper created from a `std::fs::File` which reads the file
/// chunk-by-chunk on a `ThreadPool`.
pub struct ChunkedReadFile {
size: u64,
offset: u64,
state: ChunkedReadFileState,
counter: u64,
}
enum ChunkedReadFileState {
File(Option<File>),
Future(JoinHandle<Result<(File, Bytes), io::Error>>),
}
impl ChunkedReadFile {
pub(crate) fn new(size: u64, offset: u64, file: File) -> Self {
Self {
size,
offset,
state: ChunkedReadFileState::File(Some(file)),
counter: 0,
}
}
}
impl fmt::Debug for ChunkedReadFile {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("ChunkedReadFile")
}
}
impl Stream for ChunkedReadFile {
type Item = Result<Bytes, Error>;
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
let this = self.as_mut().get_mut();
match this.state {
ChunkedReadFileState::File(ref mut file) => {
let size = this.size;
let offset = this.offset;
let counter = this.counter;
if size == counter {
Poll::Ready(None)
} else {
let mut file = file
.take()
.expect("ChunkedReadFile polled after completion");
let fut = spawn_blocking(move || {
let max_bytes =
cmp::min(size.saturating_sub(counter), 65_536) as usize;
let mut buf = Vec::with_capacity(max_bytes);
file.seek(io::SeekFrom::Start(offset))?;
let n_bytes = file
.by_ref()
.take(max_bytes as u64)
.read_to_end(&mut buf)?;
if n_bytes == 0 {
return Err(io::ErrorKind::UnexpectedEof.into());
}
Ok((file, Bytes::from(buf)))
});
this.state = ChunkedReadFileState::Future(fut);
self.poll_next(cx)
}
}
ChunkedReadFileState::Future(ref mut fut) => {
let (file, bytes) =
ready!(Pin::new(fut).poll(cx)).map_err(|_| BlockingError)??;
this.state = ChunkedReadFileState::File(Some(file));
this.offset += bytes.len() as u64;
this.counter += bytes.len() as u64;
Poll::Ready(Some(Ok(bytes)))
}
}
}
}

View File

@ -0,0 +1,114 @@
use std::{fmt::Write, fs::DirEntry, io, path::Path, path::PathBuf};
use actix_web::{dev::ServiceResponse, HttpRequest, HttpResponse};
use askama_escape::{escape as escape_html_entity, Html};
use percent_encoding::{utf8_percent_encode, CONTROLS};
/// A directory; responds with the generated directory listing.
#[derive(Debug)]
pub struct Directory {
/// Base directory.
pub base: PathBuf,
/// Path of subdirectory to generate listing for.
pub path: PathBuf,
}
impl Directory {
/// Create a new directory
pub fn new(base: PathBuf, path: PathBuf) -> Directory {
Directory { base, path }
}
/// Is this entry visible from this directory?
pub fn is_visible(&self, entry: &io::Result<DirEntry>) -> bool {
if let Ok(ref entry) = *entry {
if let Some(name) = entry.file_name().to_str() {
if name.starts_with('.') {
return false;
}
}
if let Ok(ref md) = entry.metadata() {
let ft = md.file_type();
return ft.is_dir() || ft.is_file() || ft.is_symlink();
}
}
false
}
}
pub(crate) type DirectoryRenderer =
dyn Fn(&Directory, &HttpRequest) -> Result<ServiceResponse, io::Error>;
// show file url as relative to static path
macro_rules! encode_file_url {
($path:ident) => {
utf8_percent_encode(&$path, CONTROLS)
};
}
// " -- &quot; & -- &amp; ' -- &#x27; < -- &lt; > -- &gt; / -- &#x2f;
macro_rules! encode_file_name {
($entry:ident) => {
escape_html_entity(&$entry.file_name().to_string_lossy(), Html)
};
}
pub(crate) fn directory_listing(
dir: &Directory,
req: &HttpRequest,
) -> Result<ServiceResponse, io::Error> {
let index_of = format!("Index of {}", req.path());
let mut body = String::new();
let base = Path::new(req.path());
for entry in dir.path.read_dir()? {
if dir.is_visible(&entry) {
let entry = entry.unwrap();
let p = match entry.path().strip_prefix(&dir.path) {
Ok(p) if cfg!(windows) => {
base.join(p).to_string_lossy().replace("\\", "/")
}
Ok(p) => base.join(p).to_string_lossy().into_owned(),
Err(_) => continue,
};
// if file is a directory, add '/' to the end of the name
if let Ok(metadata) = entry.metadata() {
if metadata.is_dir() {
let _ = write!(
body,
"<li><a href=\"{}\">{}/</a></li>",
encode_file_url!(p),
encode_file_name!(entry),
);
} else {
let _ = write!(
body,
"<li><a href=\"{}\">{}</a></li>",
encode_file_url!(p),
encode_file_name!(entry),
);
}
} else {
continue;
}
}
}
let html = format!(
"<html>\
<head><title>{}</title></head>\
<body><h1>{}</h1>\
<ul>\
{}\
</ul></body>\n</html>",
index_of, index_of, body
);
Ok(ServiceResponse::new(
req.clone(),
HttpResponse::Ok()
.content_type("text/html; charset=utf-8")
.body(html),
))
}

View File

@ -0,0 +1,52 @@
use mime::Mime;
/// Transforms MIME `text/*` types into their UTF-8 equivalent, if supported.
///
/// MIME types that are converted
/// - application/javascript
/// - text/html
/// - text/css
/// - text/plain
/// - text/csv
/// - text/tab-separated-values
pub(crate) fn equiv_utf8_text(ct: Mime) -> Mime {
// use (roughly) order of file-type popularity for a web server
if ct == mime::APPLICATION_JAVASCRIPT {
return mime::APPLICATION_JAVASCRIPT_UTF_8;
}
if ct == mime::TEXT_HTML {
return mime::TEXT_HTML_UTF_8;
}
if ct == mime::TEXT_CSS {
return mime::TEXT_CSS_UTF_8;
}
if ct == mime::TEXT_PLAIN {
return mime::TEXT_PLAIN_UTF_8;
}
if ct == mime::TEXT_CSV {
return mime::TEXT_CSV_UTF_8;
}
if ct == mime::TEXT_TAB_SEPARATED_VALUES {
return mime::TEXT_TAB_SEPARATED_VALUES_UTF_8;
}
ct
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_equiv_utf8_text() {
assert_eq!(equiv_utf8_text(mime::TEXT_PLAIN), mime::TEXT_PLAIN_UTF_8);
assert_eq!(equiv_utf8_text(mime::TEXT_XML), mime::TEXT_XML);
assert_eq!(equiv_utf8_text(mime::IMAGE_PNG), mime::IMAGE_PNG);
}
}

282
actix-files/src/files.rs Normal file
View File

@ -0,0 +1,282 @@
use std::{cell::RefCell, fmt, io, path::PathBuf, rc::Rc};
use actix_service::{boxed, IntoServiceFactory, ServiceFactory, ServiceFactoryExt};
use actix_web::{
dev::{
AppService, HttpServiceFactory, ResourceDef, ServiceRequest, ServiceResponse,
},
error::Error,
guard::Guard,
http::header::DispositionType,
HttpRequest,
};
use futures_util::future::{ok, FutureExt, LocalBoxFuture};
use crate::{
directory_listing, named, Directory, DirectoryRenderer, FilesService,
HttpNewService, MimeOverride,
};
/// Static files handling service.
///
/// `Files` service must be registered with `App::service()` method.
///
/// ```rust
/// use actix_web::App;
/// use actix_files::Files;
///
/// let app = App::new()
/// .service(Files::new("/static", "."));
/// ```
pub struct Files {
path: String,
directory: PathBuf,
index: Option<String>,
show_index: bool,
redirect_to_slash: bool,
default: Rc<RefCell<Option<Rc<HttpNewService>>>>,
renderer: Rc<DirectoryRenderer>,
mime_override: Option<Rc<MimeOverride>>,
file_flags: named::Flags,
guards: Option<Rc<dyn Guard>>,
hidden_files: bool,
}
impl fmt::Debug for Files {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("Files")
}
}
impl Clone for Files {
fn clone(&self) -> Self {
Self {
directory: self.directory.clone(),
index: self.index.clone(),
show_index: self.show_index,
redirect_to_slash: self.redirect_to_slash,
default: self.default.clone(),
renderer: self.renderer.clone(),
file_flags: self.file_flags,
path: self.path.clone(),
mime_override: self.mime_override.clone(),
guards: self.guards.clone(),
hidden_files: self.hidden_files,
}
}
}
impl Files {
/// Create new `Files` instance for a specified base directory.
///
/// # Argument Order
/// The first argument (`mount_path`) is the root URL at which the static files are served.
/// For example, `/assets` will serve files at `example.com/assets/...`.
///
/// The second argument (`serve_from`) is the location on disk at which files are loaded.
/// This can be a relative path. For example, `./` would serve files from the current
/// working directory.
///
/// # Implementation Notes
/// If the mount path is set as the root path `/`, services registered after this one will
/// be inaccessible. Register more specific handlers and services first.
///
/// `Files` uses a threadpool for blocking filesystem operations. By default, the pool uses a
/// max number of threads equal to `512 * HttpServer::worker`. Real time thread count are
/// adjusted with work load. More threads would spawn when need and threads goes idle for a
/// period of time would be de-spawned.
pub fn new<T: Into<PathBuf>>(mount_path: &str, serve_from: T) -> Files {
let orig_dir = serve_from.into();
let dir = match orig_dir.canonicalize() {
Ok(canon_dir) => canon_dir,
Err(_) => {
log::error!("Specified path is not a directory: {:?}", orig_dir);
PathBuf::new()
}
};
Files {
path: mount_path.to_owned(),
directory: dir,
index: None,
show_index: false,
redirect_to_slash: false,
default: Rc::new(RefCell::new(None)),
renderer: Rc::new(directory_listing),
mime_override: None,
file_flags: named::Flags::default(),
guards: None,
hidden_files: false,
}
}
/// Show files listing for directories.
///
/// By default show files listing is disabled.
pub fn show_files_listing(mut self) -> Self {
self.show_index = true;
self
}
/// Redirects to a slash-ended path when browsing a directory.
///
/// By default never redirect.
pub fn redirect_to_slash_directory(mut self) -> Self {
self.redirect_to_slash = true;
self
}
/// Set custom directory renderer
pub fn files_listing_renderer<F>(mut self, f: F) -> Self
where
for<'r, 's> F: Fn(&'r Directory, &'s HttpRequest) -> Result<ServiceResponse, io::Error>
+ 'static,
{
self.renderer = Rc::new(f);
self
}
/// Specifies mime override callback
pub fn mime_override<F>(mut self, f: F) -> Self
where
F: Fn(&mime::Name<'_>) -> DispositionType + 'static,
{
self.mime_override = Some(Rc::new(f));
self
}
/// Set index file
///
/// Shows specific index file for directory "/" instead of
/// showing files listing.
pub fn index_file<T: Into<String>>(mut self, index: T) -> Self {
self.index = Some(index.into());
self
}
/// Specifies whether to use ETag or not.
///
/// Default is true.
#[inline]
pub fn use_etag(mut self, value: bool) -> Self {
self.file_flags.set(named::Flags::ETAG, value);
self
}
/// Specifies whether to use Last-Modified or not.
///
/// Default is true.
#[inline]
pub fn use_last_modified(mut self, value: bool) -> Self {
self.file_flags.set(named::Flags::LAST_MD, value);
self
}
/// Specifies whether text responses should signal a UTF-8 encoding.
///
/// Default is false (but will default to true in a future version).
#[inline]
pub fn prefer_utf8(mut self, value: bool) -> Self {
self.file_flags.set(named::Flags::PREFER_UTF8, value);
self
}
/// Specifies custom guards to use for directory listings and files.
///
/// Default behaviour allows GET and HEAD.
#[inline]
pub fn use_guards<G: Guard + 'static>(mut self, guards: G) -> Self {
self.guards = Some(Rc::new(guards));
self
}
/// Disable `Content-Disposition` header.
///
/// By default Content-Disposition` header is enabled.
#[inline]
pub fn disable_content_disposition(mut self) -> Self {
self.file_flags.remove(named::Flags::CONTENT_DISPOSITION);
self
}
/// Sets default handler which is used when no matched file could be found.
pub fn default_handler<F, U>(mut self, f: F) -> Self
where
F: IntoServiceFactory<U, ServiceRequest>,
U: ServiceFactory<
ServiceRequest,
Config = (),
Response = ServiceResponse,
Error = Error,
> + 'static,
{
// create and configure default resource
self.default = Rc::new(RefCell::new(Some(Rc::new(boxed::factory(
f.into_factory().map_init_err(|_| ()),
)))));
self
}
/// Enables serving hidden files and directories, allowing a leading dots in url fragments.
#[inline]
pub fn use_hidden_files(mut self) -> Self {
self.hidden_files = true;
self
}
}
impl HttpServiceFactory for Files {
fn register(self, config: &mut AppService) {
if self.default.borrow().is_none() {
*self.default.borrow_mut() = Some(config.default_service());
}
let rdef = if config.is_root() {
ResourceDef::root_prefix(&self.path)
} else {
ResourceDef::prefix(&self.path)
};
config.register_service(rdef, None, self, None)
}
}
impl ServiceFactory<ServiceRequest> for Files {
type Response = ServiceResponse;
type Error = Error;
type Config = ();
type Service = FilesService;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
let mut srv = FilesService {
directory: self.directory.clone(),
index: self.index.clone(),
show_index: self.show_index,
redirect_to_slash: self.redirect_to_slash,
default: None,
renderer: self.renderer.clone(),
mime_override: self.mime_override.clone(),
file_flags: self.file_flags,
guards: self.guards.clone(),
hidden_files: self.hidden_files,
};
if let Some(ref default) = *self.default.borrow() {
default
.new_service(())
.map(move |result| match result {
Ok(default) => {
srv.default = Some(default);
Ok(srv)
}
Err(_) => Err(()),
})
.boxed_local()
} else {
ok(srv).boxed_local()
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -7,33 +7,35 @@ use std::time::{SystemTime, UNIX_EPOCH};
#[cfg(unix)] #[cfg(unix)]
use std::os::unix::fs::MetadataExt; use std::os::unix::fs::MetadataExt;
use actix_web::{
dev::{BodyEncoding, SizedStream},
http::{
header::{
self, Charset, ContentDisposition, DispositionParam, DispositionType,
ExtendedValue,
},
ContentEncoding, StatusCode,
},
HttpMessage, HttpRequest, HttpResponse, Responder,
};
use bitflags::bitflags; use bitflags::bitflags;
use mime;
use mime_guess::from_path; use mime_guess::from_path;
use actix_http::body::SizedStream;
use actix_web::dev::BodyEncoding;
use actix_web::http::header::{
self, Charset, ContentDisposition, DispositionParam, DispositionType, ExtendedValue,
};
use actix_web::http::{ContentEncoding, StatusCode};
use actix_web::{Error, HttpMessage, HttpRequest, HttpResponse, Responder};
use futures_util::future::{ready, Ready};
use crate::range::HttpRange;
use crate::ChunkedReadFile; use crate::ChunkedReadFile;
use crate::{encoding::equiv_utf8_text, range::HttpRange};
bitflags! { bitflags! {
pub(crate) struct Flags: u8 { pub(crate) struct Flags: u8 {
const ETAG = 0b0000_0001; const ETAG = 0b0000_0001;
const LAST_MD = 0b0000_0010; const LAST_MD = 0b0000_0010;
const CONTENT_DISPOSITION = 0b0000_0100; const CONTENT_DISPOSITION = 0b0000_0100;
const PREFER_UTF8 = 0b0000_1000;
} }
} }
impl Default for Flags { impl Default for Flags {
fn default() -> Self { fn default() -> Self {
Flags::all() Flags::from_bits_truncate(0b0000_0111)
} }
} }
@ -90,12 +92,15 @@ impl NamedFile {
}; };
let ct = from_path(&path).first_or_octet_stream(); let ct = from_path(&path).first_or_octet_stream();
let disposition_type = match ct.type_() {
let disposition = match ct.type_() {
mime::IMAGE | mime::TEXT | mime::VIDEO => DispositionType::Inline, mime::IMAGE | mime::TEXT | mime::VIDEO => DispositionType::Inline,
_ => DispositionType::Attachment, _ => DispositionType::Attachment,
}; };
let mut parameters = let mut parameters =
vec![DispositionParam::Filename(String::from(filename.as_ref()))]; vec![DispositionParam::Filename(String::from(filename.as_ref()))];
if !filename.is_ascii() { if !filename.is_ascii() {
parameters.push(DispositionParam::FilenameExt(ExtendedValue { parameters.push(DispositionParam::FilenameExt(ExtendedValue {
charset: Charset::Ext(String::from("UTF-8")), charset: Charset::Ext(String::from("UTF-8")),
@ -103,16 +108,19 @@ impl NamedFile {
value: filename.into_owned().into_bytes(), value: filename.into_owned().into_bytes(),
})) }))
} }
let cd = ContentDisposition { let cd = ContentDisposition {
disposition: disposition_type, disposition,
parameters: parameters, parameters,
}; };
(ct, cd) (ct, cd)
}; };
let md = file.metadata()?; let md = file.metadata()?;
let modified = md.modified().ok(); let modified = md.modified().ok();
let encoding = None; let encoding = None;
Ok(NamedFile { Ok(NamedFile {
path, path,
file, file,
@ -184,7 +192,7 @@ impl NamedFile {
/// image, and video content types, and `attachment` otherwise, and /// image, and video content types, and `attachment` otherwise, and
/// the filename is taken from the path provided in the `open` method /// the filename is taken from the path provided in the `open` method
/// after converting it to UTF-8 using. /// after converting it to UTF-8 using.
/// [to_string_lossy](https://doc.rust-lang.org/std/ffi/struct.OsStr.html#method.to_string_lossy). /// [`std::ffi::OsStr::to_string_lossy`]
#[inline] #[inline]
pub fn set_content_disposition(mut self, cd: header::ContentDisposition) -> Self { pub fn set_content_disposition(mut self, cd: header::ContentDisposition) -> Self {
self.content_disposition = cd; self.content_disposition = cd;
@ -208,24 +216,33 @@ impl NamedFile {
self self
} }
#[inline] /// Specifies whether to use ETag or not.
///Specifies whether to use ETag or not.
/// ///
///Default is true. /// Default is true.
#[inline]
pub fn use_etag(mut self, value: bool) -> Self { pub fn use_etag(mut self, value: bool) -> Self {
self.flags.set(Flags::ETAG, value); self.flags.set(Flags::ETAG, value);
self self
} }
#[inline] /// Specifies whether to use Last-Modified or not.
///Specifies whether to use Last-Modified or not.
/// ///
///Default is true. /// Default is true.
#[inline]
pub fn use_last_modified(mut self, value: bool) -> Self { pub fn use_last_modified(mut self, value: bool) -> Self {
self.flags.set(Flags::LAST_MD, value); self.flags.set(Flags::LAST_MD, value);
self self
} }
/// Specifies whether text responses should signal a UTF-8 encoding.
///
/// Default is false (but will default to true in a future version).
#[inline]
pub fn prefer_utf8(mut self, value: bool) -> Self {
self.flags.set(Flags::PREFER_UTF8, value);
self
}
pub(crate) fn etag(&self) -> Option<header::EntityTag> { pub(crate) fn etag(&self) -> Option<header::EntityTag> {
// This etag format is similar to Apache's. // This etag format is similar to Apache's.
self.modified.as_ref().map(|mtime| { self.modified.as_ref().map(|mtime| {
@ -243,6 +260,7 @@ impl NamedFile {
let dur = mtime let dur = mtime
.duration_since(UNIX_EPOCH) .duration_since(UNIX_EPOCH)
.expect("modification time must be after epoch"); .expect("modification time must be after epoch");
header::EntityTag::strong(format!( header::EntityTag::strong(format!(
"{:x}:{:x}:{:x}:{:x}", "{:x}:{:x}:{:x}:{:x}",
ino, ino,
@ -257,27 +275,32 @@ impl NamedFile {
self.modified.map(|mtime| mtime.into()) self.modified.map(|mtime| mtime.into())
} }
pub fn into_response(self, req: &HttpRequest) -> Result<HttpResponse, Error> { /// Creates an `HttpResponse` with file as a streaming body.
pub fn into_response(self, req: &HttpRequest) -> HttpResponse {
if self.status_code != StatusCode::OK { if self.status_code != StatusCode::OK {
let mut resp = HttpResponse::build(self.status_code); let mut res = HttpResponse::build(self.status_code);
resp.set(header::ContentType(self.content_type.clone()))
.if_true(self.flags.contains(Flags::CONTENT_DISPOSITION), |res| { if self.flags.contains(Flags::PREFER_UTF8) {
res.header( let ct = equiv_utf8_text(self.content_type.clone());
header::CONTENT_DISPOSITION, res.insert_header((header::CONTENT_TYPE, ct.to_string()));
self.content_disposition.to_string(), } else {
); res.insert_header((header::CONTENT_TYPE, self.content_type.to_string()));
});
if let Some(current_encoding) = self.encoding {
resp.encoding(current_encoding);
} }
let reader = ChunkedReadFile {
size: self.md.len(), if self.flags.contains(Flags::CONTENT_DISPOSITION) {
offset: 0, res.insert_header((
file: Some(self.file), header::CONTENT_DISPOSITION,
fut: None, self.content_disposition.to_string(),
counter: 0, ));
}; }
return Ok(resp.streaming(reader));
if let Some(current_encoding) = self.encoding {
res.encoding(current_encoding);
}
let reader = ChunkedReadFile::new(self.md.len(), 0, self.file);
return res.streaming(reader);
} }
let etag = if self.flags.contains(Flags::ETAG) { let etag = if self.flags.contains(Flags::ETAG) {
@ -285,6 +308,7 @@ impl NamedFile {
} else { } else {
None None
}; };
let last_modified = if self.flags.contains(Flags::LAST_MD) { let last_modified = if self.flags.contains(Flags::LAST_MD) {
self.last_modified() self.last_modified()
} else { } else {
@ -299,8 +323,9 @@ impl NamedFile {
{ {
let t1: SystemTime = m.clone().into(); let t1: SystemTime = m.clone().into();
let t2: SystemTime = since.clone().into(); let t2: SystemTime = since.clone().into();
match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) { match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) {
(Ok(t1), Ok(t2)) => t1 > t2, (Ok(t1), Ok(t2)) => t1.as_secs() > t2.as_secs(),
_ => false, _ => false,
} }
} else { } else {
@ -310,15 +335,16 @@ impl NamedFile {
// check last modified // check last modified
let not_modified = if !none_match(etag.as_ref(), req) { let not_modified = if !none_match(etag.as_ref(), req) {
true true
} else if req.headers().contains_key(&header::IF_NONE_MATCH) { } else if req.headers().contains_key(header::IF_NONE_MATCH) {
false false
} else if let (Some(ref m), Some(header::IfModifiedSince(ref since))) = } else if let (Some(ref m), Some(header::IfModifiedSince(ref since))) =
(last_modified, req.get_header()) (last_modified, req.get_header())
{ {
let t1: SystemTime = m.clone().into(); let t1: SystemTime = m.clone().into();
let t2: SystemTime = since.clone().into(); let t2: SystemTime = since.clone().into();
match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) { match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) {
(Ok(t1), Ok(t2)) => t1 <= t2, (Ok(t1), Ok(t2)) => t1.as_secs() <= t2.as_secs(),
_ => false, _ => false,
} }
} else { } else {
@ -326,38 +352,48 @@ impl NamedFile {
}; };
let mut resp = HttpResponse::build(self.status_code); let mut resp = HttpResponse::build(self.status_code);
resp.set(header::ContentType(self.content_type.clone()))
.if_true(self.flags.contains(Flags::CONTENT_DISPOSITION), |res| { if self.flags.contains(Flags::PREFER_UTF8) {
res.header( let ct = equiv_utf8_text(self.content_type.clone());
header::CONTENT_DISPOSITION, resp.insert_header((header::CONTENT_TYPE, ct.to_string()));
self.content_disposition.to_string(), } else {
); resp.insert_header((header::CONTENT_TYPE, self.content_type.to_string()));
}); }
if self.flags.contains(Flags::CONTENT_DISPOSITION) {
resp.insert_header((
header::CONTENT_DISPOSITION,
self.content_disposition.to_string(),
));
}
// default compressing // default compressing
if let Some(current_encoding) = self.encoding { if let Some(current_encoding) = self.encoding {
resp.encoding(current_encoding); resp.encoding(current_encoding);
} }
resp.if_some(last_modified, |lm, resp| { if let Some(lm) = last_modified {
resp.set(header::LastModified(lm)); resp.insert_header((header::LAST_MODIFIED, lm.to_string()));
}) }
.if_some(etag, |etag, resp| {
resp.set(header::ETag(etag));
});
resp.header(header::ACCEPT_RANGES, "bytes"); if let Some(etag) = etag {
resp.insert_header((header::ETAG, etag.to_string()));
}
resp.insert_header((header::ACCEPT_RANGES, "bytes"));
let mut length = self.md.len(); let mut length = self.md.len();
let mut offset = 0; let mut offset = 0;
// check for range header // check for range header
if let Some(ranges) = req.headers().get(&header::RANGE) { if let Some(ranges) = req.headers().get(header::RANGE) {
if let Ok(rangesheader) = ranges.to_str() { if let Ok(ranges_header) = ranges.to_str() {
if let Ok(rangesvec) = HttpRange::parse(rangesheader, length) { if let Ok(ranges) = HttpRange::parse(ranges_header, length) {
length = rangesvec[0].length; length = ranges[0].length;
offset = rangesvec[0].start; offset = ranges[0].start;
resp.encoding(ContentEncoding::Identity); resp.encoding(ContentEncoding::Identity);
resp.header( resp.insert_header((
header::CONTENT_RANGE, header::CONTENT_RANGE,
format!( format!(
"bytes {}-{}/{}", "bytes {}-{}/{}",
@ -365,35 +401,32 @@ impl NamedFile {
offset + length - 1, offset + length - 1,
self.md.len() self.md.len()
), ),
); ));
} else { } else {
resp.header(header::CONTENT_RANGE, format!("bytes */{}", length)); resp.insert_header((
return Ok(resp.status(StatusCode::RANGE_NOT_SATISFIABLE).finish()); header::CONTENT_RANGE,
format!("bytes */{}", length),
));
return resp.status(StatusCode::RANGE_NOT_SATISFIABLE).finish();
}; };
} else { } else {
return Ok(resp.status(StatusCode::BAD_REQUEST).finish()); return resp.status(StatusCode::BAD_REQUEST).finish();
}; };
}; };
if precondition_failed { if precondition_failed {
return Ok(resp.status(StatusCode::PRECONDITION_FAILED).finish()); return resp.status(StatusCode::PRECONDITION_FAILED).finish();
} else if not_modified { } else if not_modified {
return Ok(resp.status(StatusCode::NOT_MODIFIED).finish()); return resp.status(StatusCode::NOT_MODIFIED).finish();
} }
let reader = ChunkedReadFile { let reader = ChunkedReadFile::new(length, offset, self.file);
offset,
size: length,
file: Some(self.file),
fut: None,
counter: 0,
};
if offset != 0 || length != self.md.len() { if offset != 0 || length != self.md.len() {
resp.status(StatusCode::PARTIAL_CONTENT); resp.status(StatusCode::PARTIAL_CONTENT);
} }
Ok(resp.body(SizedStream::new(length, reader))) resp.body(SizedStream::new(length, reader))
} }
} }
@ -415,6 +448,7 @@ impl DerefMut for NamedFile {
fn any_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool { fn any_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
match req.get_header::<header::IfMatch>() { match req.get_header::<header::IfMatch>() {
None | Some(header::IfMatch::Any) => true, None | Some(header::IfMatch::Any) => true,
Some(header::IfMatch::Items(ref items)) => { Some(header::IfMatch::Items(ref items)) => {
if let Some(some_etag) = etag { if let Some(some_etag) = etag {
for item in items { for item in items {
@ -423,6 +457,7 @@ fn any_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
} }
} }
} }
false false
} }
} }
@ -432,6 +467,7 @@ fn any_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
fn none_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool { fn none_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
match req.get_header::<header::IfNoneMatch>() { match req.get_header::<header::IfNoneMatch>() {
Some(header::IfNoneMatch::Any) => false, Some(header::IfNoneMatch::Any) => false,
Some(header::IfNoneMatch::Items(ref items)) => { Some(header::IfNoneMatch::Items(ref items)) => {
if let Some(some_etag) = etag { if let Some(some_etag) = etag {
for item in items { for item in items {
@ -440,17 +476,16 @@ fn none_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
} }
} }
} }
true true
} }
None => true, None => true,
} }
} }
impl Responder for NamedFile { impl Responder for NamedFile {
type Error = Error; fn respond_to(self, req: &HttpRequest) -> HttpResponse {
type Future = Ready<Result<HttpResponse, Error>>; self.into_response(req)
fn respond_to(self, req: &HttpRequest) -> Self::Future {
ready(self.into_response(req))
} }
} }

119
actix-files/src/path_buf.rs Normal file
View File

@ -0,0 +1,119 @@
use std::{
path::{Path, PathBuf},
str::FromStr,
};
use actix_web::{dev::Payload, FromRequest, HttpRequest};
use futures_util::future::{ready, Ready};
use crate::error::UriSegmentError;
#[derive(Debug)]
pub(crate) struct PathBufWrap(PathBuf);
impl FromStr for PathBufWrap {
type Err = UriSegmentError;
fn from_str(path: &str) -> Result<Self, Self::Err> {
Self::parse_path(path, false)
}
}
impl PathBufWrap {
/// Parse a path, giving the choice of allowing hidden files to be considered valid segments.
pub fn parse_path(path: &str, hidden_files: bool) -> Result<Self, UriSegmentError> {
let mut buf = PathBuf::new();
for segment in path.split('/') {
if segment == ".." {
buf.pop();
} else if !hidden_files && segment.starts_with('.') {
return Err(UriSegmentError::BadStart('.'));
} else if segment.starts_with('*') {
return Err(UriSegmentError::BadStart('*'));
} else if segment.ends_with(':') {
return Err(UriSegmentError::BadEnd(':'));
} else if segment.ends_with('>') {
return Err(UriSegmentError::BadEnd('>'));
} else if segment.ends_with('<') {
return Err(UriSegmentError::BadEnd('<'));
} else if segment.is_empty() {
continue;
} else if cfg!(windows) && segment.contains('\\') {
return Err(UriSegmentError::BadChar('\\'));
} else {
buf.push(segment)
}
}
Ok(PathBufWrap(buf))
}
}
impl AsRef<Path> for PathBufWrap {
fn as_ref(&self) -> &Path {
self.0.as_ref()
}
}
impl FromRequest for PathBufWrap {
type Error = UriSegmentError;
type Future = Ready<Result<Self, Self::Error>>;
type Config = ();
fn from_request(req: &HttpRequest, _: &mut Payload) -> Self::Future {
ready(req.match_info().path().parse())
}
}
#[cfg(test)]
mod tests {
use std::iter::FromIterator;
use super::*;
#[test]
fn test_path_buf() {
assert_eq!(
PathBufWrap::from_str("/test/.tt").map(|t| t.0),
Err(UriSegmentError::BadStart('.'))
);
assert_eq!(
PathBufWrap::from_str("/test/*tt").map(|t| t.0),
Err(UriSegmentError::BadStart('*'))
);
assert_eq!(
PathBufWrap::from_str("/test/tt:").map(|t| t.0),
Err(UriSegmentError::BadEnd(':'))
);
assert_eq!(
PathBufWrap::from_str("/test/tt<").map(|t| t.0),
Err(UriSegmentError::BadEnd('<'))
);
assert_eq!(
PathBufWrap::from_str("/test/tt>").map(|t| t.0),
Err(UriSegmentError::BadEnd('>'))
);
assert_eq!(
PathBufWrap::from_str("/seg1/seg2/").unwrap().0,
PathBuf::from_iter(vec!["seg1", "seg2"])
);
assert_eq!(
PathBufWrap::from_str("/seg1/../seg2/").unwrap().0,
PathBuf::from_iter(vec!["seg2"])
);
}
#[test]
fn test_parse_path() {
assert_eq!(
PathBufWrap::parse_path("/test/.tt", false).map(|t| t.0),
Err(UriSegmentError::BadStart('.'))
);
assert_eq!(
PathBufWrap::parse_path("/test/.tt", true).unwrap().0,
PathBuf::from_iter(vec!["test", ".tt"])
);
}
}

View File

@ -1,24 +1,33 @@
use derive_more::{Display, Error};
/// HTTP Range header representation. /// HTTP Range header representation.
#[derive(Debug, Clone, Copy)] #[derive(Debug, Clone, Copy)]
pub struct HttpRange { pub struct HttpRange {
/// Start of range.
pub start: u64, pub start: u64,
/// Length of range.
pub length: u64, pub length: u64,
} }
static PREFIX: &str = "bytes="; const PREFIX: &str = "bytes=";
const PREFIX_LEN: usize = 6; const PREFIX_LEN: usize = 6;
#[derive(Debug, Clone, Display, Error)]
#[display(fmt = "Parse HTTP Range failed")]
pub struct ParseRangeErr(#[error(not(source))] ());
impl HttpRange { impl HttpRange {
/// Parses Range HTTP header string as per RFC 2616. /// Parses Range HTTP header string as per RFC 2616.
/// ///
/// `header` is HTTP Range header (e.g. `bytes=bytes=0-9`). /// `header` is HTTP Range header (e.g. `bytes=bytes=0-9`).
/// `size` is full size of response (file). /// `size` is full size of response (file).
pub fn parse(header: &str, size: u64) -> Result<Vec<HttpRange>, ()> { pub fn parse(header: &str, size: u64) -> Result<Vec<HttpRange>, ParseRangeErr> {
if header.is_empty() { if header.is_empty() {
return Ok(Vec::new()); return Ok(Vec::new());
} }
if !header.starts_with(PREFIX) { if !header.starts_with(PREFIX) {
return Err(()); return Err(ParseRangeErr(()));
} }
let size_sig = size as i64; let size_sig = size as i64;
@ -31,13 +40,14 @@ impl HttpRange {
.map(|ra| { .map(|ra| {
let mut start_end_iter = ra.split('-'); let mut start_end_iter = ra.split('-');
let start_str = start_end_iter.next().ok_or(())?.trim(); let start_str = start_end_iter.next().ok_or(ParseRangeErr(()))?.trim();
let end_str = start_end_iter.next().ok_or(())?.trim(); let end_str = start_end_iter.next().ok_or(ParseRangeErr(()))?.trim();
if start_str.is_empty() { if start_str.is_empty() {
// If no start is specified, end specifies the // If no start is specified, end specifies the
// range start relative to the end of the file. // range start relative to the end of the file.
let mut length: i64 = end_str.parse().map_err(|_| ())?; let mut length: i64 =
end_str.parse().map_err(|_| ParseRangeErr(()))?;
if length > size_sig { if length > size_sig {
length = size_sig; length = size_sig;
@ -48,10 +58,10 @@ impl HttpRange {
length: length as u64, length: length as u64,
})) }))
} else { } else {
let start: i64 = start_str.parse().map_err(|_| ())?; let start: i64 = start_str.parse().map_err(|_| ParseRangeErr(()))?;
if start < 0 { if start < 0 {
return Err(()); return Err(ParseRangeErr(()));
} }
if start >= size_sig { if start >= size_sig {
no_overlap = true; no_overlap = true;
@ -62,10 +72,11 @@ impl HttpRange {
// If no end is specified, range extends to end of the file. // If no end is specified, range extends to end of the file.
size_sig - start size_sig - start
} else { } else {
let mut end: i64 = end_str.parse().map_err(|_| ())?; let mut end: i64 =
end_str.parse().map_err(|_| ParseRangeErr(()))?;
if start > end { if start > end {
return Err(()); return Err(ParseRangeErr(()));
} }
if end >= size_sig { if end >= size_sig {
@ -86,7 +97,7 @@ impl HttpRange {
let ranges: Vec<HttpRange> = all_ranges.into_iter().filter_map(|x| x).collect(); let ranges: Vec<HttpRange> = all_ranges.into_iter().filter_map(|x| x).collect();
if no_overlap && ranges.is_empty() { if no_overlap && ranges.is_empty() {
return Err(()); return Err(ParseRangeErr(()));
} }
Ok(ranges) Ok(ranges)
@ -330,8 +341,7 @@ mod tests {
if expected.is_empty() { if expected.is_empty() {
continue; continue;
} else { } else {
assert!( panic!(
false,
"parse({}, {}) returned error {:?}", "parse({}, {}) returned error {:?}",
header, header,
size, size,
@ -343,28 +353,24 @@ mod tests {
let got = res.unwrap(); let got = res.unwrap();
if got.len() != expected.len() { if got.len() != expected.len() {
assert!( panic!(
false,
"len(parseRange({}, {})) = {}, want {}", "len(parseRange({}, {})) = {}, want {}",
header, header,
size, size,
got.len(), got.len(),
expected.len() expected.len()
); );
continue;
} }
for i in 0..expected.len() { for i in 0..expected.len() {
if got[i].start != expected[i].start { if got[i].start != expected[i].start {
assert!( panic!(
false,
"parseRange({}, {})[{}].start = {}, want {}", "parseRange({}, {})[{}].start = {}, want {}",
header, size, i, got[i].start, expected[i].start header, size, i, got[i].start, expected[i].start
) )
} }
if got[i].length != expected[i].length { if got[i].length != expected[i].length {
assert!( panic!(
false,
"parseRange({}, {})[{}].length = {}, want {}", "parseRange({}, {})[{}].length = {}, want {}",
header, size, i, got[i].length, expected[i].length header, size, i, got[i].length, expected[i].length
) )

155
actix-files/src/service.rs Normal file
View File

@ -0,0 +1,155 @@
use std::{fmt, io, path::PathBuf, rc::Rc, task::Poll};
use actix_service::Service;
use actix_web::{
dev::{ServiceRequest, ServiceResponse},
error::Error,
guard::Guard,
http::{header, Method},
HttpResponse,
};
use futures_util::future::{ok, Either, LocalBoxFuture, Ready};
use crate::{
named, Directory, DirectoryRenderer, FilesError, HttpService, MimeOverride,
NamedFile, PathBufWrap,
};
/// Assembled file serving service.
pub struct FilesService {
pub(crate) directory: PathBuf,
pub(crate) index: Option<String>,
pub(crate) show_index: bool,
pub(crate) redirect_to_slash: bool,
pub(crate) default: Option<HttpService>,
pub(crate) renderer: Rc<DirectoryRenderer>,
pub(crate) mime_override: Option<Rc<MimeOverride>>,
pub(crate) file_flags: named::Flags,
pub(crate) guards: Option<Rc<dyn Guard>>,
pub(crate) hidden_files: bool,
}
type FilesServiceFuture = Either<
Ready<Result<ServiceResponse, Error>>,
LocalBoxFuture<'static, Result<ServiceResponse, Error>>,
>;
impl FilesService {
fn handle_err(&self, e: io::Error, req: ServiceRequest) -> FilesServiceFuture {
log::debug!("Failed to handle {}: {}", req.path(), e);
if let Some(ref default) = self.default {
Either::Right(default.call(req))
} else {
Either::Left(ok(req.error_response(e)))
}
}
}
impl fmt::Debug for FilesService {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str("FilesService")
}
}
impl Service<ServiceRequest> for FilesService {
type Response = ServiceResponse;
type Error = Error;
type Future = FilesServiceFuture;
actix_service::always_ready!();
fn call(&self, req: ServiceRequest) -> Self::Future {
let is_method_valid = if let Some(guard) = &self.guards {
// execute user defined guards
(**guard).check(req.head())
} else {
// default behavior
matches!(*req.method(), Method::HEAD | Method::GET)
};
if !is_method_valid {
return Either::Left(ok(req.into_response(
actix_web::HttpResponse::MethodNotAllowed()
.insert_header(header::ContentType(mime::TEXT_PLAIN_UTF_8))
.body("Request did not meet this resource's requirements."),
)));
}
let real_path =
match PathBufWrap::parse_path(req.match_info().path(), self.hidden_files) {
Ok(item) => item,
Err(e) => return Either::Left(ok(req.error_response(e))),
};
// full file path
let path = match self.directory.join(&real_path).canonicalize() {
Ok(path) => path,
Err(e) => return self.handle_err(e, req),
};
if path.is_dir() {
if let Some(ref redir_index) = self.index {
if self.redirect_to_slash && !req.path().ends_with('/') {
let redirect_to = format!("{}/", req.path());
return Either::Left(ok(req.into_response(
HttpResponse::Found()
.insert_header((header::LOCATION, redirect_to))
.body("")
.into_body(),
)));
}
let path = path.join(redir_index);
match NamedFile::open(path) {
Ok(mut named_file) => {
if let Some(ref mime_override) = self.mime_override {
let new_disposition =
mime_override(&named_file.content_type.type_());
named_file.content_disposition.disposition = new_disposition;
}
named_file.flags = self.file_flags;
let (req, _) = req.into_parts();
let res = named_file.into_response(&req);
Either::Left(ok(ServiceResponse::new(req, res)))
}
Err(e) => self.handle_err(e, req),
}
} else if self.show_index {
let dir = Directory::new(self.directory.clone(), path);
let (req, _) = req.into_parts();
let x = (self.renderer)(&dir, &req);
match x {
Ok(resp) => Either::Left(ok(resp)),
Err(e) => Either::Left(ok(ServiceResponse::from_err(e, req))),
}
} else {
Either::Left(ok(ServiceResponse::from_err(
FilesError::IsDirectory,
req.into_parts().0,
)))
}
} else {
match NamedFile::open(path) {
Ok(mut named_file) => {
if let Some(ref mime_override) = self.mime_override {
let new_disposition =
mime_override(&named_file.content_type.type_());
named_file.content_disposition.disposition = new_disposition;
}
named_file.flags = self.file_flags;
let (req, _) = req.into_parts();
let res = named_file.into_response(&req);
Either::Left(ok(ServiceResponse::new(req, res)))
}
Err(e) => self.handle_err(e, req),
}
}
}
}

View File

@ -0,0 +1,39 @@
use actix_files::Files;
use actix_web::{
http::{
header::{self, HeaderValue},
StatusCode,
},
test::{self, TestRequest},
App,
};
#[actix_rt::test]
async fn test_utf8_file_contents() {
// use default ISO-8859-1 encoding
let srv = test::init_service(App::new().service(Files::new("/", "./tests"))).await;
let req = TestRequest::with_uri("/utf8.txt").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(
res.headers().get(header::CONTENT_TYPE),
Some(&HeaderValue::from_static("text/plain")),
);
// prefer UTF-8 encoding
let srv = test::init_service(
App::new().service(Files::new("/", "./tests").prefer_utf8(true)),
)
.await;
let req = TestRequest::with_uri("/utf8.txt").to_request();
let res = test::call_service(&srv, req).await;
assert_eq!(res.status(), StatusCode::OK);
assert_eq!(
res.headers().get(header::CONTENT_TYPE),
Some(&HeaderValue::from_static("text/plain; charset=utf-8")),
);
}

View File

@ -0,0 +1,3 @@
中文内容显示正确。
English is OK.

View File

@ -1,3 +0,0 @@
# Framed app for actix web
**This crate has been deprecated and removed.**

View File

@ -1,7 +1,32 @@
# Changes # Changes
## [2.0.0-alpha.1] - 2020-05-23 ## Unreleased - 2021-xx-xx
## 3.0.0-beta.2 - 2021-02-10
* No notable changes.
## 3.0.0-beta.1 - 2021-01-07
* Update `bytes` to `1.0`. [#1813]
[#1813]: https://github.com/actix/actix-web/pull/1813
## 2.1.0 - 2020-11-25
* Add ability to set address for `TestServer`. [#1645]
* Upgrade `base64` to `0.13`.
* Upgrade `serde_urlencoded` to `0.7`. [#1773]
[#1773]: https://github.com/actix/actix-web/pull/1773
[#1645]: https://github.com/actix/actix-web/pull/1645
## 2.0.0 - 2020-09-11
* Update actix-codec and actix-utils dependencies.
## 2.0.0-alpha.1 - 2020-05-23
* Update the `time` dependency to 0.2.7 * Update the `time` dependency to 0.2.7
* Update `actix-connect` dependency to 2.0.0-alpha.2 * Update `actix-connect` dependency to 2.0.0-alpha.2
* Make `test_server` `async` fn. * Make `test_server` `async` fn.
@ -10,74 +35,56 @@
* Update `base64` dependency to 0.12 * Update `base64` dependency to 0.12
* Update `env_logger` dependency to 0.7 * Update `env_logger` dependency to 0.7
## [1.0.0] - 2019-12-13 ## 1.0.0 - 2019-12-13
### Changed
* Replaced `TestServer::start()` with `test_server()` * Replaced `TestServer::start()` with `test_server()`
## [1.0.0-alpha.3] - 2019-12-07 ## 1.0.0-alpha.3 - 2019-12-07
### Changed
* Migrate to `std::future` * Migrate to `std::future`
## [0.2.5] - 2019-09-17 ## 0.2.5 - 2019-09-17
### Changed
* Update serde_urlencoded to "0.6.1" * Update serde_urlencoded to "0.6.1"
* Increase TestServerRuntime timeouts from 500ms to 3000ms * Increase TestServerRuntime timeouts from 500ms to 3000ms
### Fixed
* Do not override current `System` * Do not override current `System`
## [0.2.4] - 2019-07-18 ## 0.2.4 - 2019-07-18
* Update actix-server to 0.6 * Update actix-server to 0.6
## [0.2.3] - 2019-07-16
## 0.2.3 - 2019-07-16
* Add `delete`, `options`, `patch` methods to `TestServerRunner` * Add `delete`, `options`, `patch` methods to `TestServerRunner`
## [0.2.2] - 2019-06-16
## 0.2.2 - 2019-06-16
* Add .put() and .sput() methods * Add .put() and .sput() methods
## [0.2.1] - 2019-06-05
## 0.2.1 - 2019-06-05
* Add license files * Add license files
## [0.2.0] - 2019-05-12
## 0.2.0 - 2019-05-12
* Update awc and actix-http deps * Update awc and actix-http deps
## [0.1.1] - 2019-04-24
## 0.1.1 - 2019-04-24
* Always make new connection for http client * Always make new connection for http client
## [0.1.0] - 2019-04-16 ## 0.1.0 - 2019-04-16
* No changes * No changes
## [0.1.0-alpha.3] - 2019-04-02 ## 0.1.0-alpha.3 - 2019-04-02
* Request functions accept path #743 * Request functions accept path #743
## [0.1.0-alpha.2] - 2019-03-29 ## 0.1.0-alpha.2 - 2019-03-29
* Added TestServerRuntime::load_body() method * Added TestServerRuntime::load_body() method
* Update actix-http and awc libraries * Update actix-http and awc libraries
## [0.1.0-alpha.1] - 2019-03-28 ## 0.1.0-alpha.1 - 2019-03-28
* Initial impl * Initial impl

View File

@ -1,8 +1,8 @@
[package] [package]
name = "actix-http-test" name = "actix-http-test"
version = "2.0.0-alpha.1" version = "3.0.0-beta.2"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"] authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix HTTP test server" description = "Various helpers for Actix applications to use during testing"
readme = "README.md" readme = "README.md"
keywords = ["http", "web", "framework", "async", "futures"] keywords = ["http", "web", "framework", "async", "futures"]
homepage = "https://actix.rs" homepage = "https://actix.rs"
@ -26,31 +26,30 @@ path = "src/lib.rs"
default = [] default = []
# openssl # openssl
openssl = ["open-ssl", "awc/openssl"] openssl = ["tls-openssl", "awc/openssl"]
[dependencies] [dependencies]
actix-service = "1.0.1" actix-service = "2.0.0-beta.4"
actix-codec = "0.2.0" actix-codec = "0.4.0-beta.1"
actix-connect = "2.0.0-alpha.2" actix-tls = "3.0.0-beta.3"
actix-utils = "1.0.3" actix-utils = "3.0.0-beta.2"
actix-rt = "1.0.0" actix-rt = "2"
actix-server = "1.0.0" actix-server = "2.0.0-beta.3"
actix-testing = "1.0.0" awc = "3.0.0-beta.2"
awc = "2.0.0-alpha.2"
base64 = "0.12" base64 = "0.13"
bytes = "0.5.3" bytes = "1"
futures-core = { version = "0.3.5", default-features = false } futures-core = { version = "0.3.7", default-features = false }
http = "0.2.0" http = "0.2.2"
log = "0.4" log = "0.4"
socket2 = "0.3" socket2 = "0.3"
serde = "1.0" serde = "1.0"
serde_json = "1.0" serde_json = "1.0"
slab = "0.4" slab = "0.4"
serde_urlencoded = "0.6.1" serde_urlencoded = "0.7"
time = { version = "0.2.7", default-features = false, features = ["std"] } time = { version = "0.2.23", default-features = false, features = ["std"] }
open-ssl = { version = "0.10", package = "openssl", optional = true } tls-openssl = { version = "0.10.9", package = "openssl", optional = true }
[dev-dependencies] [dev-dependencies]
actix-web = "3.0.0-alpha.3" actix-web = "4.0.0-beta.2"
actix-http = "2.0.0-beta.1" actix-http = "3.0.0-beta.2"

15
actix-http-test/README.md Normal file
View File

@ -0,0 +1,15 @@
# actix-http-test
> Various helpers for Actix applications to use during testing.
[![crates.io](https://img.shields.io/crates/v/actix-http-test?label=latest)](https://crates.io/crates/actix-http-test)
[![Documentation](https://docs.rs/actix-http-test/badge.svg?version=2.1.0)](https://docs.rs/actix-http-test/2.1.0)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http-test)
[![Dependency Status](https://deps.rs/crate/actix-http-test/2.1.0/status.svg)](https://deps.rs/crate/actix-http-test/2.1.0)
[![Join the chat at https://gitter.im/actix/actix-web](https://badges.gitter.im/actix/actix-web.svg)](https://gitter.im/actix/actix-web?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
## Documentation & Resources
- [API Documentation](https://docs.rs/actix-http-test)
- [Chat on Gitter](https://gitter.im/actix/actix-web)
- Minimum Supported Rust Version (MSRV): 1.46.0

View File

@ -1,4 +1,12 @@
//! Various helpers for Actix applications to use during testing. //! Various helpers for Actix applications to use during testing.
#![deny(rust_2018_idioms)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
#[cfg(feature = "openssl")]
extern crate tls_openssl as openssl;
use std::sync::mpsc; use std::sync::mpsc;
use std::{net, thread, time}; use std::{net, thread, time};
@ -11,8 +19,6 @@ use futures_core::stream::Stream;
use http::Method; use http::Method;
use socket2::{Domain, Protocol, Socket, Type}; use socket2::{Domain, Protocol, Socket, Type};
pub use actix_testing::*;
/// Start test server /// Start test server
/// ///
/// `TestServer` is very simple test server that simplify process of writing /// `TestServer` is very simple test server that simplify process of writing
@ -44,21 +50,32 @@ pub use actix_testing::*;
/// } /// }
/// ``` /// ```
pub async fn test_server<F: ServiceFactory<TcpStream>>(factory: F) -> TestServer { pub async fn test_server<F: ServiceFactory<TcpStream>>(factory: F) -> TestServer {
let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap();
test_server_with_addr(tcp, factory).await
}
/// Start [`test server`](test_server()) on a concrete Address
pub async fn test_server_with_addr<F: ServiceFactory<TcpStream>>(
tcp: net::TcpListener,
factory: F,
) -> TestServer {
let (tx, rx) = mpsc::channel(); let (tx, rx) = mpsc::channel();
// run server in separate thread // run server in separate thread
thread::spawn(move || { thread::spawn(move || {
let sys = System::new("actix-test-server"); let sys = System::new();
let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap();
let local_addr = tcp.local_addr().unwrap(); let local_addr = tcp.local_addr().unwrap();
Server::build() let srv = Server::build()
.listen("test", tcp, factory)? .listen("test", tcp, factory)?
.workers(1) .workers(1)
.disable_signals() .disable_signals();
.start();
sys.block_on(async {
srv.run();
tx.send((System::current(), local_addr)).unwrap();
});
tx.send((System::current(), local_addr)).unwrap();
sys.run() sys.run()
}); });
@ -68,7 +85,7 @@ pub async fn test_server<F: ServiceFactory<TcpStream>>(factory: F) -> TestServer
let connector = { let connector = {
#[cfg(feature = "openssl")] #[cfg(feature = "openssl")]
{ {
use open_ssl::ssl::{SslConnector, SslMethod, SslVerifyMode}; use openssl::ssl::{SslConnector, SslMethod, SslVerifyMode};
let mut builder = SslConnector::builder(SslMethod::tls()).unwrap(); let mut builder = SslConnector::builder(SslMethod::tls()).unwrap();
builder.set_verify(SslVerifyMode::NONE); builder.set_verify(SslVerifyMode::NONE);
@ -90,9 +107,8 @@ pub async fn test_server<F: ServiceFactory<TcpStream>>(factory: F) -> TestServer
} }
}; };
Client::build().connector(connector).finish() Client::builder().connector(connector).finish()
}; };
actix_connect::start_default_resolver().await.unwrap();
TestServer { TestServer {
addr, addr,

View File

@ -1,11 +1,152 @@
# Changes # Changes
## [Unreleased] - xxx ## Unreleased - 2021-xx-xx
## [2.0.0-beta.1] - 2020-07-11
## 3.0.0-beta.2 - 2021-02-19
### Added
* `IntoHeaderPair` trait that allows using typed and untyped headers in the same methods. [#1869]
* `ResponseBuilder::insert_header` method which allows using typed headers. [#1869]
* `ResponseBuilder::append_header` method which allows using typed headers. [#1869]
* `TestRequest::insert_header` method which allows using typed headers. [#1869]
* `ContentEncoding` implements all necessary header traits. [#1912]
* `HeaderMap::len_keys` has the behavior of the old `len` method. [#1964]
* `HeaderMap::drain` as an efficient draining iterator. [#1964]
* Implement `IntoIterator` for owned `HeaderMap`. [#1964]
* `trust-dns` optional feature to enable `trust-dns-resolver` as client dns resolver. [#1969]
### Changed ### Changed
* `ResponseBuilder::content_type` now takes an `impl IntoHeaderValue` to support using typed
`mime` types. [#1894]
* Renamed `IntoHeaderValue::{try_into => try_into_value}` to avoid ambiguity with std
`TryInto` trait. [#1894]
* `Extensions::insert` returns Option of replaced item. [#1904]
* Remove `HttpResponseBuilder::json2()`. [#1903]
* Enable `HttpResponseBuilder::json()` to receive data by value and reference. [#1903]
* `client::error::ConnectError` Resolver variant contains `Box<dyn std::error::Error>` type. [#1905]
* `client::ConnectorConfig` default timeout changed to 5 seconds. [#1905]
* Simplify `BlockingError` type to a unit struct. It's now only triggered when blocking thread pool
is dead. [#1957]
* `HeaderMap::len` now returns number of values instead of number of keys. [#1964]
* `HeaderMap::insert` now returns iterator of removed values. [#1964]
* `HeaderMap::remove` now returns iterator of removed values. [#1964]
### Removed
* `ResponseBuilder::set`; use `ResponseBuilder::insert_header`. [#1869]
* `ResponseBuilder::set_header`; use `ResponseBuilder::insert_header`. [#1869]
* `ResponseBuilder::header`; use `ResponseBuilder::append_header`. [#1869]
* `TestRequest::with_hdr`; use `TestRequest::default().insert_header()`. [#1869]
* `TestRequest::with_header`; use `TestRequest::default().insert_header()`. [#1869]
* `actors` optional feature. [#1969]
* `ResponseError` impl for `actix::MailboxError`. [#1969]
### Documentation
* Vastly improve docs and add examples for `HeaderMap`. [#1964]
[#1869]: https://github.com/actix/actix-web/pull/1869
[#1894]: https://github.com/actix/actix-web/pull/1894
[#1903]: https://github.com/actix/actix-web/pull/1903
[#1904]: https://github.com/actix/actix-web/pull/1904
[#1905]: https://github.com/actix/actix-web/pull/1905
[#1912]: https://github.com/actix/actix-web/pull/1912
[#1957]: https://github.com/actix/actix-web/pull/1957
[#1964]: https://github.com/actix/actix-web/pull/1964
[#1969]: https://github.com/actix/actix-web/pull/1969
## 3.0.0-beta.1 - 2021-01-07
### Added
* Add `Http3` to `Protocol` enum for future compatibility and also mark `#[non_exhaustive]`.
### Changed
* Update `actix-*` dependencies to tokio `1.0` based versions. [#1813]
* Bumped `rand` to `0.8`.
* Update `bytes` to `1.0`. [#1813]
* Update `h2` to `0.3`. [#1813]
* The `ws::Message::Text` enum variant now contains a `bytestring::ByteString`. [#1864]
### Removed
* Deprecated `on_connect` methods have been removed. Prefer the new
`on_connect_ext` technique. [#1857]
* Remove `ResponseError` impl for `actix::actors::resolver::ResolverError`
due to deprecate of resolver actor. [#1813]
* Remove `ConnectError::SslHandshakeError` and re-export of `HandshakeError`.
due to the removal of this type from `tokio-openssl` crate. openssl handshake
error would return as `ConnectError::SslError`. [#1813]
* Remove `actix-threadpool` dependency. Use `actix_rt::task::spawn_blocking`.
Due to this change `actix_threadpool::BlockingError` type is moved into
`actix_http::error` module. [#1878]
[#1813]: https://github.com/actix/actix-web/pull/1813
[#1857]: https://github.com/actix/actix-web/pull/1857
[#1864]: https://github.com/actix/actix-web/pull/1864
[#1878]: https://github.com/actix/actix-web/pull/1878
## 2.2.0 - 2020-11-25
### Added
* HttpResponse builders for 1xx status codes. [#1768]
* `Accept::mime_precedence` and `Accept::mime_preference`. [#1793]
* `TryFrom<u16>` and `TryFrom<f32>` for `http::header::Quality`. [#1797]
### Fixed
* Started dropping `transfer-encoding: chunked` and `Content-Length` for 1XX and 204 responses. [#1767]
### Changed
* Upgrade `serde_urlencoded` to `0.7`. [#1773]
[#1773]: https://github.com/actix/actix-web/pull/1773
[#1767]: https://github.com/actix/actix-web/pull/1767
[#1768]: https://github.com/actix/actix-web/pull/1768
[#1793]: https://github.com/actix/actix-web/pull/1793
[#1797]: https://github.com/actix/actix-web/pull/1797
## 2.1.0 - 2020-10-30
### Added
* Added more flexible `on_connect_ext` methods for on-connect handling. [#1754]
### Changed
* Upgrade `base64` to `0.13`. [#1744]
* Upgrade `pin-project` to `1.0`. [#1733]
* Deprecate `ResponseBuilder::{if_some, if_true}`. [#1760]
[#1760]: https://github.com/actix/actix-web/pull/1760
[#1754]: https://github.com/actix/actix-web/pull/1754
[#1733]: https://github.com/actix/actix-web/pull/1733
[#1744]: https://github.com/actix/actix-web/pull/1744
## 2.0.0 - 2020-09-11
* No significant changes from `2.0.0-beta.4`.
## 2.0.0-beta.4 - 2020-09-09
### Changed
* Update actix-codec and actix-utils dependencies.
* Update actix-connect and actix-tls dependencies.
## 2.0.0-beta.3 - 2020-08-14
### Fixed
* Memory leak of `client::pool::ConnectorPoolSupport`. [#1626]
[#1626]: https://github.com/actix/actix-web/pull/1626
## 2.0.0-beta.2 - 2020-07-21
### Fixed
* Potential UB in h1 decoder using uninitialized memory. [#1614]
### Changed
* Fix illegal chunked encoding. [#1615]
[#1614]: https://github.com/actix/actix-web/pull/1614
[#1615]: https://github.com/actix/actix-web/pull/1615
## 2.0.0-beta.1 - 2020-07-11
### Changed
* Migrate cookie handling to `cookie` crate. [#1558] * Migrate cookie handling to `cookie` crate. [#1558]
* Update `sha-1` to 0.9. [#1586] * Update `sha-1` to 0.9. [#1586]
* Fix leak in client pool. [#1580] * Fix leak in client pool. [#1580]
@ -15,33 +156,30 @@
[#1586]: https://github.com/actix/actix-web/pull/1586 [#1586]: https://github.com/actix/actix-web/pull/1586
[#1580]: https://github.com/actix/actix-web/pull/1580 [#1580]: https://github.com/actix/actix-web/pull/1580
## [2.0.0-alpha.4] - 2020-05-21
## 2.0.0-alpha.4 - 2020-05-21
### Changed ### Changed
* Bump minimum supported Rust version to 1.40 * Bump minimum supported Rust version to 1.40
* content_length function is removed, and you can set Content-Length by calling no_chunking function [#1439] * content_length function is removed, and you can set Content-Length by calling
no_chunking function [#1439]
* `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a * `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a
`u64` instead of a `usize`. `u64` instead of a `usize`.
* Update `base64` dependency to 0.12 * Update `base64` dependency to 0.12
### Fixed ### Fixed
* Support parsing of `SameSite=None` [#1503] * Support parsing of `SameSite=None` [#1503]
[#1439]: https://github.com/actix/actix-web/pull/1439 [#1439]: https://github.com/actix/actix-web/pull/1439
[#1503]: https://github.com/actix/actix-web/pull/1503 [#1503]: https://github.com/actix/actix-web/pull/1503
## [2.0.0-alpha.3] - 2020-05-08
## 2.0.0-alpha.3 - 2020-05-08
### Fixed ### Fixed
* Correct spelling of ConnectError::Unresolved [#1487] * Correct spelling of ConnectError::Unresolved [#1487]
* Fix a mistake in the encoding of websocket continuation messages wherein * Fix a mistake in the encoding of websocket continuation messages wherein
Item::FirstText and Item::FirstBinary are each encoded as the other. Item::FirstText and Item::FirstBinary are each encoded as the other.
### Changed ### Changed
* Implement `std::error::Error` for our custom errors [#1422] * Implement `std::error::Error` for our custom errors [#1422]
* Remove `failure` support for `ResponseError` since that crate * Remove `failure` support for `ResponseError` since that crate
will be deprecated in the near future. will be deprecated in the near future.
@ -49,338 +187,247 @@
[#1422]: https://github.com/actix/actix-web/pull/1422 [#1422]: https://github.com/actix/actix-web/pull/1422
[#1487]: https://github.com/actix/actix-web/pull/1487 [#1487]: https://github.com/actix/actix-web/pull/1487
## [2.0.0-alpha.2] - 2020-03-07
## 2.0.0-alpha.2 - 2020-03-07
### Changed ### Changed
* Update `actix-connect` and `actix-tls` dependency to 2.0.0-alpha.1. [#1395] * Update `actix-connect` and `actix-tls` dependency to 2.0.0-alpha.1. [#1395]
* Change default initial window size and connection window size for HTTP2 to 2MB and 1MB
* Change default initial window size and connection window size for HTTP2 to 2MB and 1MB respectively respectively to improve download speed for awc when downloading large objects. [#1394]
to improve download speed for awc when downloading large objects. [#1394] * client::Connector accepts initial_window_size and initial_connection_window_size
HTTP2 configuration. [#1394]
* client::Connector accepts initial_window_size and initial_connection_window_size HTTP2 configuration. [#1394]
* client::Connector allowing to set max_http_version to limit HTTP version to be used. [#1394] * client::Connector allowing to set max_http_version to limit HTTP version to be used. [#1394]
[#1394]: https://github.com/actix/actix-web/pull/1394 [#1394]: https://github.com/actix/actix-web/pull/1394
[#1395]: https://github.com/actix/actix-web/pull/1395 [#1395]: https://github.com/actix/actix-web/pull/1395
## [2.0.0-alpha.1] - 2020-02-27
## 2.0.0-alpha.1 - 2020-02-27
### Changed ### Changed
* Update the `time` dependency to 0.2.7. * Update the `time` dependency to 0.2.7.
* Moved actors messages support from actix crate, enabled with feature `actors`. * Moved actors messages support from actix crate, enabled with feature `actors`.
* Breaking change: trait MessageBody requires Unpin and accepting Pin<&mut Self> instead of &mut self in the poll_next(). * Breaking change: trait MessageBody requires Unpin and accepting `Pin<&mut Self>` instead of
`&mut self` in the poll_next().
* MessageBody is not implemented for &'static [u8] anymore. * MessageBody is not implemented for &'static [u8] anymore.
### Fixed ### Fixed
* Allow `SameSite=None` cookies to be sent in a response. * Allow `SameSite=None` cookies to be sent in a response.
## [1.0.1] - 2019-12-20
## 1.0.1 - 2019-12-20
### Fixed ### Fixed
* Poll upgrade service's readiness from HTTP service handlers * Poll upgrade service's readiness from HTTP service handlers
* Replace brotli with brotli2 #1224 * Replace brotli with brotli2 #1224
## [1.0.0] - 2019-12-13
## 1.0.0 - 2019-12-13
### Added ### Added
* Add websockets continuation frame support * Add websockets continuation frame support
### Changed ### Changed
* Replace `flate2-xxx` features with `compress` * Replace `flate2-xxx` features with `compress`
## [1.0.0-alpha.5] - 2019-12-09
## 1.0.0-alpha.5 - 2019-12-09
### Fixed ### Fixed
* Check `Upgrade` service readiness before calling it * Check `Upgrade` service readiness before calling it
* Fix buffer remaining capacity calculation
* Fix buffer remaining capacity calcualtion
### Changed ### Changed
* Websockets: Ping and Pong should have binary data #1049 * Websockets: Ping and Pong should have binary data #1049
## [1.0.0-alpha.4] - 2019-12-08
## 1.0.0-alpha.4 - 2019-12-08
### Added ### Added
* Add impl ResponseBuilder for Error * Add impl ResponseBuilder for Error
### Changed ### Changed
* Use rust based brotli compression library * Use rust based brotli compression library
## [1.0.0-alpha.3] - 2019-12-07 ## 1.0.0-alpha.3 - 2019-12-07
### Changed ### Changed
* Migrate to tokio 0.2 * Migrate to tokio 0.2
* Migrate to `std::future` * Migrate to `std::future`
## [0.2.11] - 2019-11-06 ## 0.2.11 - 2019-11-06
### Added ### Added
* Add support for serde_json::Value to be passed as argument to ResponseBuilder.body() * Add support for serde_json::Value to be passed as argument to ResponseBuilder.body()
* Add an additional `filename*` param in the `Content-Disposition` header of
* Add an additional `filename*` param in the `Content-Disposition` header of `actix_files::NamedFile` to be more compatible. (#1151) `actix_files::NamedFile` to be more compatible. (#1151)
* Allow to use `std::convert::Infallible` as `actix_http::error::Error` * Allow to use `std::convert::Infallible` as `actix_http::error::Error`
### Fixed ### Fixed
* To be compatible with non-English error responses, `ResponseError` rendered with `text/plain;
charset=utf-8` header [#1118]
* To be compatible with non-English error responses, `ResponseError` rendered with `text/plain; charset=utf-8` header #1118 [#1878]: https://github.com/actix/actix-web/pull/1878
## [0.2.10] - 2019-09-11 ## 0.2.10 - 2019-09-11
### Added ### Added
* Add support for sending HTTP requests with `Rc<RequestHead>` in addition to sending HTTP requests
* Add support for sending HTTP requests with `Rc<RequestHead>` in addition to sending HTTP requests with `RequestHead` with `RequestHead`
### Fixed ### Fixed
* h2 will use error response #1080 * h2 will use error response #1080
* on_connect result isn't added to request extensions for http2 requests #1009 * on_connect result isn't added to request extensions for http2 requests #1009
## [0.2.9] - 2019-08-13 ## 0.2.9 - 2019-08-13
### Changed ### Changed
* Dropped the `byteorder`-dependency in favor of `stdlib`-implementation * Dropped the `byteorder`-dependency in favor of `stdlib`-implementation
* Update percent-encoding to 2.1 * Update percent-encoding to 2.1
* Update serde_urlencoded to 0.6.1 * Update serde_urlencoded to 0.6.1
### Fixed ### Fixed
* Fixed a panic in the HTTP2 handshake in client HTTP requests (#1031) * Fixed a panic in the HTTP2 handshake in client HTTP requests (#1031)
## [0.2.8] - 2019-08-01 ## 0.2.8 - 2019-08-01
### Added ### Added
* Add `rustls` support * Add `rustls` support
* Add `Clone` impl for `HeaderMap` * Add `Clone` impl for `HeaderMap`
### Fixed ### Fixed
* awc client panic #1016 * awc client panic #1016
* Invalid response with compression middleware enabled, but compression-related features
* Invalid response with compression middleware enabled, but compression-related features disabled #997 disabled #997
## [0.2.7] - 2019-07-18 ## 0.2.7 - 2019-07-18
### Added ### Added
* Add support for downcasting response errors #986 * Add support for downcasting response errors #986
## [0.2.6] - 2019-07-17 ## 0.2.6 - 2019-07-17
### Changed ### Changed
* Replace `ClonableService` with local copy * Replace `ClonableService` with local copy
* Upgrade `rand` dependency version to 0.7 * Upgrade `rand` dependency version to 0.7
## [0.2.5] - 2019-06-28 ## 0.2.5 - 2019-06-28
### Added ### Added
* Add `on-connect` callback, `HttpServiceBuilder::on_connect()` #946 * Add `on-connect` callback, `HttpServiceBuilder::on_connect()` #946
### Changed ### Changed
* Use `encoding_rs` crate instead of unmaintained `encoding` crate * Use `encoding_rs` crate instead of unmaintained `encoding` crate
* Add `Copy` and `Clone` impls for `ws::Codec` * Add `Copy` and `Clone` impls for `ws::Codec`
## [0.2.4] - 2019-06-16 ## 0.2.4 - 2019-06-16
### Fixed ### Fixed
* Do not compress NoContent (204) responses #918 * Do not compress NoContent (204) responses #918
## [0.2.3] - 2019-06-02 ## 0.2.3 - 2019-06-02
### Added ### Added
* Debug impl for ResponseBuilder * Debug impl for ResponseBuilder
* From SizedStream and BodyStream for Body * From SizedStream and BodyStream for Body
### Changed ### Changed
* SizedStream uses u64 * SizedStream uses u64
## [0.2.2] - 2019-05-29 ## 0.2.2 - 2019-05-29
### Fixed ### Fixed
* Parse incoming stream before closing stream on disconnect #868 * Parse incoming stream before closing stream on disconnect #868
## [0.2.1] - 2019-05-25 ## 0.2.1 - 2019-05-25
### Fixed ### Fixed
* Handle socket read disconnect * Handle socket read disconnect
## [0.2.0] - 2019-05-12 ## 0.2.0 - 2019-05-12
### Changed ### Changed
* Update actix-service to 0.4 * Update actix-service to 0.4
* Expect and upgrade services accept `ServerConfig` config. * Expect and upgrade services accept `ServerConfig` config.
### Deleted ### Deleted
* `OneRequest` service * `OneRequest` service
## [0.1.5] - 2019-05-04 ## 0.1.5 - 2019-05-04
### Fixed ### Fixed
* Clean up response extensions in response pool #817 * Clean up response extensions in response pool #817
## [0.1.4] - 2019-04-24 ## 0.1.4 - 2019-04-24
### Added ### Added
* Allow to render h1 request headers in `Camel-Case` * Allow to render h1 request headers in `Camel-Case`
### Fixed ### Fixed
* Read until eof for http/1.0 responses #771 * Read until eof for http/1.0 responses #771
## [0.1.3] - 2019-04-23 ## 0.1.3 - 2019-04-23
### Fixed ### Fixed
* Fix http client pool management * Fix http client pool management
* Fix http client wait queue management #794 * Fix http client wait queue management #794
## [0.1.2] - 2019-04-23 ## 0.1.2 - 2019-04-23
### Fixed ### Fixed
* Fix BorrowMutError panic in client connector #793 * Fix BorrowMutError panic in client connector #793
## [0.1.1] - 2019-04-19 ## 0.1.1 - 2019-04-19
### Changed ### Changed
* Cookie::max_age() accepts value in seconds * Cookie::max_age() accepts value in seconds
* Cookie::max_age_time() accepts value in time::Duration * Cookie::max_age_time() accepts value in time::Duration
* Allow to specify server address for client connector * Allow to specify server address for client connector
## [0.1.0] - 2019-04-16 ## 0.1.0 - 2019-04-16
### Added ### Added
* Expose peer addr via `Request::peer_addr()` and `RequestHead::peer_addr` * Expose peer addr via `Request::peer_addr()` and `RequestHead::peer_addr`
### Changed ### Changed
* `actix_http::encoding` always available * `actix_http::encoding` always available
* use trust-dns-resolver 0.11.0 * use trust-dns-resolver 0.11.0
## [0.1.0-alpha.5] - 2019-04-12 ## 0.1.0-alpha.5 - 2019-04-12
### Added ### Added
* Allow to use custom service for upgrade requests * Allow to use custom service for upgrade requests
* Added `h1::SendResponse` future. * Added `h1::SendResponse` future.
### Changed ### Changed
* MessageBody::length() renamed to MessageBody::size() for consistency * MessageBody::length() renamed to MessageBody::size() for consistency
* ws handshake verification functions take RequestHead instead of Request * ws handshake verification functions take RequestHead instead of Request
## [0.1.0-alpha.4] - 2019-04-08 ## 0.1.0-alpha.4 - 2019-04-08
### Added ### Added
* Allow to use custom `Expect` handler * Allow to use custom `Expect` handler
* Add minimal `std::error::Error` impl for `Error` * Add minimal `std::error::Error` impl for `Error`
### Changed ### Changed
* Export IntoHeaderValue * Export IntoHeaderValue
* Render error and return as response body * Render error and return as response body
* Use thread pool for response body compression
* Use thread pool for response body comression
### Deleted ### Deleted
* Removed PayloadBuffer * Removed PayloadBuffer
## [0.1.0-alpha.3] - 2019-04-02 ## 0.1.0-alpha.3 - 2019-04-02
### Added ### Added
* Warn when an unsealed private cookie isn't valid UTF-8 * Warn when an unsealed private cookie isn't valid UTF-8
### Fixed ### Fixed
* Rust 1.31.0 compatibility * Rust 1.31.0 compatibility
* Preallocate read buffer for h1 codec * Preallocate read buffer for h1 codec
* Detect socket disconnection during protocol selection * Detect socket disconnection during protocol selection
## [0.1.0-alpha.2] - 2019-03-29 ## 0.1.0-alpha.2 - 2019-03-29
### Added ### Added
* Added ws::Message::Nop, no-op websockets message * Added ws::Message::Nop, no-op websockets message
### Changed ### Changed
* Do not use thread pool for decompression if chunk size is smaller than 2048.
* Do not use thread pool for decomression if chunk size is smaller than 2048.
## [0.1.0-alpha.1] - 2019-03-28 ## 0.1.0-alpha.1 - 2019-03-28
* Initial impl * Initial impl

View File

@ -1,46 +0,0 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at fafhrd91@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/

View File

@ -1,8 +1,8 @@
[package] [package]
name = "actix-http" name = "actix-http"
version = "2.0.0-beta.1" version = "3.0.0-beta.2"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"] authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix HTTP primitives" description = "HTTP primitives for the Actix ecosystem"
readme = "README.md" readme = "README.md"
keywords = ["actix", "http", "framework", "async", "futures"] keywords = ["actix", "http", "framework", "async", "futures"]
homepage = "https://actix.rs" homepage = "https://actix.rs"
@ -25,10 +25,10 @@ path = "src/lib.rs"
default = [] default = []
# openssl # openssl
openssl = ["actix-tls/openssl", "actix-connect/openssl"] openssl = ["actix-tls/openssl"]
# rustls support # rustls support
rustls = ["actix-tls/rustls", "actix-connect/rustls"] rustls = ["actix-tls/rustls"]
# enable compressison support # enable compressison support
compress = ["flate2", "brotli2"] compress = ["flate2", "brotli2"]
@ -36,33 +36,29 @@ compress = ["flate2", "brotli2"]
# support for secure cookies # support for secure cookies
secure-cookies = ["cookie/secure"] secure-cookies = ["cookie/secure"]
# support for actix Actor messages # trust-dns as client dns resolver
actors = ["actix"] trust-dns = ["trust-dns-resolver"]
[dependencies] [dependencies]
actix-service = "1.0.5" actix-service = "2.0.0-beta.4"
actix-codec = "0.2.0" actix-codec = "0.4.0-beta.1"
actix-connect = "2.0.0-alpha.3" actix-utils = "3.0.0-beta.2"
actix-utils = "1.0.6" actix-rt = "2"
actix-rt = "1.0.0" actix-tls = "3.0.0-beta.2"
actix-threadpool = "0.3.1"
actix-tls = { version = "2.0.0-alpha.1", optional = true }
actix = { version = "0.10.0-alpha.1", optional = true }
base64 = "0.12" base64 = "0.13"
bitflags = "1.2" bitflags = "1.2"
bytes = "0.5.3" bytes = "1"
bytestring = "1"
cookie = { version = "0.14.1", features = ["percent-encode"] } cookie = { version = "0.14.1", features = ["percent-encode"] }
copyless = "0.1.4" derive_more = "0.99.5"
derive_more = "0.99.2"
either = "1.5.3"
encoding_rs = "0.8" encoding_rs = "0.8"
futures-channel = { version = "0.3.5", default-features = false } futures-channel = { version = "0.3.7", default-features = false, features = ["alloc"] }
futures-core = { version = "0.3.5", default-features = false } futures-core = { version = "0.3.7", default-features = false, features = ["alloc"] }
futures-util = { version = "0.3.5", default-features = false } futures-util = { version = "0.3.7", default-features = false, features = ["alloc", "sink"] }
fxhash = "0.2.1" ahash = "0.7"
h2 = "0.2.1" h2 = "0.3.0"
http = "0.2.0" http = "0.2.2"
httparse = "1.3" httparse = "1.3"
indexmap = "1.3" indexmap = "1.3"
itoa = "0.4" itoa = "0.4"
@ -71,35 +67,41 @@ language-tags = "0.2"
log = "0.4" log = "0.4"
mime = "0.3" mime = "0.3"
percent-encoding = "2.1" percent-encoding = "2.1"
pin-project = "0.4.17" pin-project = "1.0.0"
rand = "0.7" rand = "0.8"
regex = "1.3" regex = "1.3"
serde = "1.0" serde = "1.0"
serde_json = "1.0" serde_json = "1.0"
sha-1 = "0.9" sha-1 = "0.9"
smallvec = "1.6"
slab = "0.4" slab = "0.4"
serde_urlencoded = "0.6.1" serde_urlencoded = "0.7"
time = { version = "0.2.7", default-features = false, features = ["std"] } time = { version = "0.2.23", default-features = false, features = ["std"] }
# compression # compression
brotli2 = { version="0.3.2", optional = true } brotli2 = { version="0.3.2", optional = true }
flate2 = { version = "1.0.13", optional = true } flate2 = { version = "1.0.13", optional = true }
trust-dns-resolver = { version = "0.20.0", optional = true }
[dev-dependencies] [dev-dependencies]
actix-server = "1.0.1" actix-server = "2.0.0-beta.3"
actix-connect = { version = "2.0.0-alpha.2", features = ["openssl"] } actix-http-test = { version = "3.0.0-beta.2", features = ["openssl"] }
actix-http-test = { version = "2.0.0-alpha.1", features = ["openssl"] } actix-tls = { version = "3.0.0-beta.2", features = ["openssl"] }
actix-tls = { version = "2.0.0-alpha.1", features = ["openssl"] }
criterion = "0.3" criterion = "0.3"
env_logger = "0.7" env_logger = "0.8"
serde_derive = "1.0" serde_derive = "1.0"
open-ssl = { version="0.10", package = "openssl" } tls-openssl = { version = "0.10", package = "openssl" }
rust-tls = { version="0.17", package = "rustls" } tls-rustls = { version = "0.19", package = "rustls" }
[[bench]] [[bench]]
name = "content-length" name = "write-camel-case"
harness = false harness = false
[[bench]] [[bench]]
name = "status-line" name = "status-line"
harness = false harness = false
[[bench]]
name = "uninit-headers"
harness = false

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2017-NOW Nikolay Kim
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

1
actix-http/LICENSE-APACHE Symbolic link
View File

@ -0,0 +1 @@
../LICENSE-APACHE

View File

@ -1,25 +0,0 @@
Copyright (c) 2017 Nikolay Kim
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

1
actix-http/LICENSE-MIT Symbolic link
View File

@ -0,0 +1 @@
../LICENSE-MIT

View File

@ -1,24 +1,30 @@
# Actix http [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-http)](https://crates.io/crates/actix-http) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) # actix-http
Actix http > HTTP primitives for the Actix ecosystem.
## Documentation & community resources [![crates.io](https://img.shields.io/crates/v/actix-http?label=latest)](https://crates.io/crates/actix-http)
[![Documentation](https://docs.rs/actix-http/badge.svg?version=3.0.0-beta.2)](https://docs.rs/actix-http/3.0.0-beta.2)
[![Version](https://img.shields.io/badge/rustc-1.46+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.46.html)
![MIT or Apache 2.0 licensed](https://img.shields.io/crates/l/actix-http.svg)
<br />
[![dependency status](https://deps.rs/crate/actix-http/3.0.0-beta.2/status.svg)](https://deps.rs/crate/actix-http/3.0.0-beta.2)
[![Download](https://img.shields.io/crates/d/actix-http.svg)](https://crates.io/crates/actix-http)
[![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
* [User Guide](https://actix.rs/docs/) ## Documentation & Resources
* [API Documentation](https://docs.rs/actix-http/)
* [Chat on gitter](https://gitter.im/actix/actix) - [API Documentation](https://docs.rs/actix-http)
* Cargo package: [actix-http](https://crates.io/crates/actix-http) - [Chat on Gitter](https://gitter.im/actix/actix-web)
* Minimum supported Rust version: 1.40 or later - Minimum Supported Rust Version (MSRV): 1.46.0
## Example ## Example
```rust ```rust
// see examples/framed_hello.rs for complete list of used crates.
use std::{env, io}; use std::{env, io};
use actix_http::{HttpService, Response}; use actix_http::{HttpService, Response};
use actix_server::Server; use actix_server::Server;
use futures::future; use futures_util::future;
use http::header::HeaderValue; use http::header::HeaderValue;
use log::info; use log::info;

View File

@ -1,291 +0,0 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use bytes::BytesMut;
// benchmark sending all requests at the same time
fn bench_write_content_length(c: &mut Criterion) {
let mut group = c.benchmark_group("write_content_length");
let sizes = [
0, 1, 11, 83, 101, 653, 1001, 6323, 10001, 56329, 100001, 123456, 98724245,
4294967202,
];
for i in sizes.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_content_length(i, &mut b)
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_content_length(i, &mut b)
})
});
group.bench_with_input(BenchmarkId::new("itoa", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_itoa::write_content_length(i, &mut b)
})
});
}
group.finish();
}
criterion_group!(benches, bench_write_content_length);
criterion_main!(benches);
mod _itoa {
use bytes::{BufMut, BytesMut};
pub fn write_content_length(n: usize, bytes: &mut BytesMut) {
if n == 0 {
bytes.put_slice(b"\r\ncontent-length: 0\r\n");
return;
}
let mut buf = itoa::Buffer::new();
bytes.put_slice(b"\r\ncontent-length: ");
bytes.put_slice(buf.format(n).as_bytes());
bytes.put_slice(b"\r\n");
}
}
mod _new {
use bytes::{BufMut, BytesMut};
const DIGITS_START: u8 = b'0';
/// NOTE: bytes object has to contain enough space
pub fn write_content_length(n: usize, bytes: &mut BytesMut) {
if n == 0 {
bytes.put_slice(b"\r\ncontent-length: 0\r\n");
return;
}
bytes.put_slice(b"\r\ncontent-length: ");
if n < 10 {
bytes.put_u8(DIGITS_START + (n as u8));
} else if n < 100 {
let n = n as u8;
let d10 = n / 10;
let d1 = n % 10;
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 1000 {
let n = n as u16;
let d100 = (n / 100) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 10_000 {
let n = n as u16;
let d1000 = (n / 1000) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 100_000 {
let n = n as u32;
let d10000 = (n / 10000) as u8;
let d1000 = ((n / 1000) % 10) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d10000);
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 1_000_000 {
let n = n as u32;
let d100000 = (n / 100000) as u8;
let d10000 = ((n / 10000) % 10) as u8;
let d1000 = ((n / 1000) % 10) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100000);
bytes.put_u8(DIGITS_START + d10000);
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else {
write_usize(n, bytes);
}
bytes.put_slice(b"\r\n");
}
fn write_usize(n: usize, bytes: &mut BytesMut) {
let mut n = n;
// 20 chars is max length of a usize (2^64)
// digits will be added to the buffer from lsd to msd
let mut buf = BytesMut::with_capacity(20);
while n > 9 {
// "pop" the least-significant digit
let lsd = (n % 10) as u8;
// remove the lsd from n
n = n / 10;
buf.put_u8(DIGITS_START + lsd);
}
// put msd to result buffer
bytes.put_u8(DIGITS_START + (n as u8));
// put, in reverse (msd to lsd), remaining digits to buffer
for i in (0..buf.len()).rev() {
bytes.put_u8(buf[i]);
}
}
}
mod _original {
use std::{mem, ptr, slice};
use bytes::{BufMut, BytesMut};
const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\
2021222324252627282930313233343536373839\
4041424344454647484950515253545556575859\
6061626364656667686970717273747576777879\
8081828384858687888990919293949596979899";
/// NOTE: bytes object has to contain enough space
pub fn write_content_length(mut n: usize, bytes: &mut BytesMut) {
if n < 10 {
let mut buf: [u8; 21] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'\r', b'\n',
];
buf[18] = (n as u8) + b'0';
bytes.put_slice(&buf);
} else if n < 100 {
let mut buf: [u8; 22] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'\r', b'\n',
];
let d1 = n << 1;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(18),
2,
);
}
bytes.put_slice(&buf);
} else if n < 1000 {
let mut buf: [u8; 23] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'0', b'\r',
b'\n',
];
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(19),
2,
)
};
// decode last 1
buf[18] = (n as u8) + b'0';
bytes.put_slice(&buf);
} else {
bytes.put_slice(b"\r\ncontent-length: ");
convert_usize(n, bytes);
}
}
pub(crate) fn convert_usize(mut n: usize, bytes: &mut BytesMut) {
let mut curr: isize = 39;
let mut buf: [u8; 41] = unsafe { mem::MaybeUninit::uninit().assume_init() };
buf[39] = b'\r';
buf[40] = b'\n';
let buf_ptr = buf.as_mut_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
// eagerly decode 4 characters at a time
while n >= 10_000 {
let rem = (n % 10_000) as isize;
n /= 10_000;
let d1 = (rem / 100) << 1;
let d2 = (rem % 100) << 1;
curr -= 4;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
ptr::copy_nonoverlapping(
lut_ptr.offset(d2),
buf_ptr.offset(curr + 2),
2,
);
}
}
// if we reach here numbers are <= 9999, so at most 4 chars long
let mut n = n as isize; // possibly reduce 64bit math
// decode 2 more chars, if > 2 chars
if n >= 100 {
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0';
}
} else {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
unsafe {
bytes.extend_from_slice(slice::from_raw_parts(
buf_ptr.offset(curr),
41 - curr as usize,
));
}
}
}

View File

@ -176,7 +176,7 @@ mod _original {
buf[5] = b'0'; buf[5] = b'0';
buf[7] = b'9'; buf[7] = b'9';
} }
_ => (), _ => {}
} }
let mut curr: isize = 12; let mut curr: isize = 12;

View File

@ -0,0 +1,137 @@
use criterion::{criterion_group, criterion_main, Criterion};
use bytes::BytesMut;
// A Miri run detects UB, seen on this playground:
// https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=f5d9aa166aa48df8dca05fce2b6c3915
fn bench_header_parsing(c: &mut Criterion) {
c.bench_function("Original (Unsound) [short]", |b| {
b.iter(|| {
let mut buf = BytesMut::from(REQ_SHORT);
_original::parse_headers(&mut buf);
})
});
c.bench_function("New (safe) [short]", |b| {
b.iter(|| {
let mut buf = BytesMut::from(REQ_SHORT);
_new::parse_headers(&mut buf);
})
});
c.bench_function("Original (Unsound) [realistic]", |b| {
b.iter(|| {
let mut buf = BytesMut::from(REQ);
_original::parse_headers(&mut buf);
})
});
c.bench_function("New (safe) [realistic]", |b| {
b.iter(|| {
let mut buf = BytesMut::from(REQ);
_new::parse_headers(&mut buf);
})
});
}
criterion_group!(benches, bench_header_parsing);
criterion_main!(benches);
const MAX_HEADERS: usize = 96;
const EMPTY_HEADER_ARRAY: [httparse::Header<'static>; MAX_HEADERS] =
[httparse::EMPTY_HEADER; MAX_HEADERS];
#[derive(Clone, Copy)]
struct HeaderIndex {
name: (usize, usize),
value: (usize, usize),
}
const EMPTY_HEADER_INDEX: HeaderIndex = HeaderIndex {
name: (0, 0),
value: (0, 0),
};
const EMPTY_HEADER_INDEX_ARRAY: [HeaderIndex; MAX_HEADERS] =
[EMPTY_HEADER_INDEX; MAX_HEADERS];
impl HeaderIndex {
fn record(
bytes: &[u8],
headers: &[httparse::Header<'_>],
indices: &mut [HeaderIndex],
) {
let bytes_ptr = bytes.as_ptr() as usize;
for (header, indices) in headers.iter().zip(indices.iter_mut()) {
let name_start = header.name.as_ptr() as usize - bytes_ptr;
let name_end = name_start + header.name.len();
indices.name = (name_start, name_end);
let value_start = header.value.as_ptr() as usize - bytes_ptr;
let value_end = value_start + header.value.len();
indices.value = (value_start, value_end);
}
}
}
// test cases taken from:
// https://github.com/seanmonstar/httparse/blob/master/benches/parse.rs
const REQ_SHORT: &'static [u8] = b"\
GET / HTTP/1.0\r\n\
Host: example.com\r\n\
Cookie: session=60; user_id=1\r\n\r\n";
const REQ: &'static [u8] = b"\
GET /wp-content/uploads/2010/03/hello-kitty-darth-vader-pink.jpg HTTP/1.1\r\n\
Host: www.kittyhell.com\r\n\
User-Agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; ja-JP-mac; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 Pathtraq/0.9\r\n\
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n\
Accept-Language: ja,en-us;q=0.7,en;q=0.3\r\n\
Accept-Encoding: gzip,deflate\r\n\
Accept-Charset: Shift_JIS,utf-8;q=0.7,*;q=0.7\r\n\
Keep-Alive: 115\r\n\
Connection: keep-alive\r\n\
Cookie: wp_ozh_wsa_visits=2; wp_ozh_wsa_visit_lasttime=xxxxxxxxxx; __utma=xxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.x; __utmz=xxxxxxxxx.xxxxxxxxxx.x.x.utmccn=(referral)|utmcsr=reader.livedoor.com|utmcct=/reader/|utmcmd=referral|padding=under256\r\n\r\n";
mod _new {
use super::*;
pub fn parse_headers(src: &mut BytesMut) -> usize {
let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY;
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = EMPTY_HEADER_ARRAY;
let mut req = httparse::Request::new(&mut parsed);
match req.parse(src).unwrap() {
httparse::Status::Complete(_len) => {
HeaderIndex::record(src, req.headers, &mut headers);
req.headers.len()
}
_ => unreachable!(),
}
}
}
mod _original {
use super::*;
use std::mem::MaybeUninit;
pub fn parse_headers(src: &mut BytesMut) -> usize {
let mut headers: [HeaderIndex; MAX_HEADERS] =
unsafe { MaybeUninit::uninit().assume_init() };
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] =
unsafe { MaybeUninit::uninit().assume_init() };
let mut req = httparse::Request::new(&mut parsed);
match req.parse(src).unwrap() {
httparse::Status::Complete(_len) => {
HeaderIndex::record(src, req.headers, &mut headers);
req.headers.len()
}
_ => unreachable!(),
}
}
}

View File

@ -0,0 +1,89 @@
use criterion::{black_box, criterion_group, criterion_main, BenchmarkId, Criterion};
fn bench_write_camel_case(c: &mut Criterion) {
let mut group = c.benchmark_group("write_camel_case");
let names = ["connection", "Transfer-Encoding", "transfer-encoding"];
for &i in &names {
let bts = i.as_bytes();
group.bench_with_input(BenchmarkId::new("Original", i), bts, |b, bts| {
b.iter(|| {
let mut buf = black_box([0; 24]);
_original::write_camel_case(black_box(bts), &mut buf)
});
});
group.bench_with_input(BenchmarkId::new("New", i), bts, |b, bts| {
b.iter(|| {
let mut buf = black_box([0; 24]);
_new::write_camel_case(black_box(bts), &mut buf)
});
});
}
group.finish();
}
criterion_group!(benches, bench_write_camel_case);
criterion_main!(benches);
mod _new {
pub fn write_camel_case(value: &[u8], buffer: &mut [u8]) {
// first copy entire (potentially wrong) slice to output
buffer[..value.len()].copy_from_slice(value);
let mut iter = value.iter();
// first character should be uppercase
if let Some(c @ b'a'..=b'z') = iter.next() {
buffer[0] = c & 0b1101_1111;
}
// track 1 ahead of the current position since that's the location being assigned to
let mut index = 2;
// remaining characters after hyphens should also be uppercase
while let Some(&c) = iter.next() {
if c == b'-' {
// advance iter by one and uppercase if needed
if let Some(c @ b'a'..=b'z') = iter.next() {
buffer[index] = c & 0b1101_1111;
}
}
index += 1;
}
}
}
mod _original {
pub fn write_camel_case(value: &[u8], buffer: &mut [u8]) {
let mut index = 0;
let key = value;
let mut key_iter = key.iter();
if let Some(c) = key_iter.next() {
if *c >= b'a' && *c <= b'z' {
buffer[index] = *c ^ b' ';
index += 1;
}
} else {
return;
}
while let Some(c) = key_iter.next() {
buffer[index] = *c;
index += 1;
if *c == b'-' {
if let Some(c) = key_iter.next() {
if *c >= b'a' && *c <= b'z' {
buffer[index] = *c ^ b' ';
index += 1;
}
}
}
}
}
}

View File

@ -26,7 +26,10 @@ async fn main() -> io::Result<()> {
info!("request body: {:?}", body); info!("request body: {:?}", body);
Ok::<_, Error>( Ok::<_, Error>(
Response::Ok() Response::Ok()
.header("x-head", HeaderValue::from_static("dummy value!")) .insert_header((
"x-head",
HeaderValue::from_static("dummy value!"),
))
.body(body), .body(body),
) )
}) })

View File

@ -15,7 +15,7 @@ async fn handle_request(mut req: Request) -> Result<Response, Error> {
info!("request body: {:?}", body); info!("request body: {:?}", body);
Ok(Response::Ok() Ok(Response::Ok()
.header("x-head", HeaderValue::from_static("dummy value!")) .insert_header(("x-head", HeaderValue::from_static("dummy value!")))
.body(body)) .body(body))
} }

View File

@ -19,7 +19,10 @@ async fn main() -> io::Result<()> {
.finish(|_req| { .finish(|_req| {
info!("{:?}", _req); info!("{:?}", _req);
let mut res = Response::Ok(); let mut res = Response::Ok();
res.header("x-head", HeaderValue::from_static("dummy value!")); res.insert_header((
"x-head",
HeaderValue::from_static("dummy value!"),
));
future::ok::<_, ()>(res.body("Hello world!")) future::ok::<_, ()>(res.body("Hello world!"))
}) })
.tcp() .tcp()

View File

@ -1,11 +1,9 @@
use std::marker::PhantomData;
use std::pin::Pin; use std::pin::Pin;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use std::{fmt, mem}; use std::{fmt, mem};
use bytes::{Bytes, BytesMut}; use bytes::{Bytes, BytesMut};
use futures_core::Stream; use futures_core::{ready, Stream};
use futures_util::ready;
use pin_project::pin_project; use pin_project::pin_project;
use crate::error::Error; use crate::error::Error;
@ -21,12 +19,7 @@ pub enum BodySize {
impl BodySize { impl BodySize {
pub fn is_eof(&self) -> bool { pub fn is_eof(&self) -> bool {
match self { matches!(self, BodySize::None | BodySize::Empty | BodySize::Sized(0))
BodySize::None
| BodySize::Empty
| BodySize::Sized(0) => true,
_ => false,
}
} }
} }
@ -73,7 +66,7 @@ impl<T: MessageBody + Unpin> MessageBody for Box<T> {
#[pin_project(project = ResponseBodyProj)] #[pin_project(project = ResponseBodyProj)]
pub enum ResponseBody<B> { pub enum ResponseBody<B> {
Body(#[pin] B), Body(#[pin] B),
Other(#[pin] Body), Other(Body),
} }
impl ResponseBody<Body> { impl ResponseBody<Body> {
@ -115,7 +108,7 @@ impl<B: MessageBody> MessageBody for ResponseBody<B> {
) -> Poll<Option<Result<Bytes, Error>>> { ) -> Poll<Option<Result<Bytes, Error>>> {
match self.project() { match self.project() {
ResponseBodyProj::Body(body) => body.poll_next(cx), ResponseBodyProj::Body(body) => body.poll_next(cx),
ResponseBodyProj::Other(body) => body.poll_next(cx), ResponseBodyProj::Other(body) => Pin::new(body).poll_next(cx),
} }
} }
} }
@ -129,12 +122,11 @@ impl<B: MessageBody> Stream for ResponseBody<B> {
) -> Poll<Option<Self::Item>> { ) -> Poll<Option<Self::Item>> {
match self.project() { match self.project() {
ResponseBodyProj::Body(body) => body.poll_next(cx), ResponseBodyProj::Body(body) => body.poll_next(cx),
ResponseBodyProj::Other(body) => body.poll_next(cx), ResponseBodyProj::Other(body) => Pin::new(body).poll_next(cx),
} }
} }
} }
#[pin_project(project = BodyProj)]
/// Represents various types of http message body. /// Represents various types of http message body.
pub enum Body { pub enum Body {
/// Empty response. `Content-Length` header is not set. /// Empty response. `Content-Length` header is not set.
@ -173,10 +165,10 @@ impl MessageBody for Body {
self: Pin<&mut Self>, self: Pin<&mut Self>,
cx: &mut Context<'_>, cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> { ) -> Poll<Option<Result<Bytes, Error>>> {
match self.project() { match self.get_mut() {
BodyProj::None => Poll::Ready(None), Body::None => Poll::Ready(None),
BodyProj::Empty => Poll::Ready(None), Body::Empty => Poll::Ready(None),
BodyProj::Bytes(ref mut bin) => { Body::Bytes(ref mut bin) => {
let len = bin.len(); let len = bin.len();
if len == 0 { if len == 0 {
Poll::Ready(None) Poll::Ready(None)
@ -184,7 +176,7 @@ impl MessageBody for Body {
Poll::Ready(Some(Ok(mem::take(bin)))) Poll::Ready(Some(Ok(mem::take(bin))))
} }
} }
BodyProj::Message(ref mut body) => Pin::new(body.as_mut()).poll_next(cx), Body::Message(body) => Pin::new(&mut **body).poll_next(cx),
} }
} }
} }
@ -192,14 +184,8 @@ impl MessageBody for Body {
impl PartialEq for Body { impl PartialEq for Body {
fn eq(&self, other: &Body) -> bool { fn eq(&self, other: &Body) -> bool {
match *self { match *self {
Body::None => match *other { Body::None => matches!(*other, Body::None),
Body::None => true, Body::Empty => matches!(*other, Body::Empty),
_ => false,
},
Body::Empty => match *other {
Body::Empty => true,
_ => false,
},
Body::Bytes(ref b) => match *other { Body::Bytes(ref b) => match *other {
Body::Bytes(ref b2) => b == b2, Body::Bytes(ref b2) => b == b2,
_ => false, _ => false,
@ -277,12 +263,12 @@ where
} }
} }
impl<S, E> From<BodyStream<S, E>> for Body impl<S, E> From<BodyStream<S>> for Body
where where
S: Stream<Item = Result<Bytes, E>> + Unpin + 'static, S: Stream<Item = Result<Bytes, E>> + Unpin + 'static,
E: Into<Error> + 'static, E: Into<Error> + 'static,
{ {
fn from(s: BodyStream<S, E>) -> Body { fn from(s: BodyStream<S>) -> Body {
Body::from_message(s) Body::from_message(s)
} }
} }
@ -378,27 +364,21 @@ impl MessageBody for String {
/// Type represent streaming body. /// Type represent streaming body.
/// Response does not contain `content-length` header and appropriate transfer encoding is used. /// Response does not contain `content-length` header and appropriate transfer encoding is used.
#[pin_project] pub struct BodyStream<S: Unpin> {
pub struct BodyStream<S: Unpin, E> {
#[pin]
stream: S, stream: S,
_t: PhantomData<E>,
} }
impl<S, E> BodyStream<S, E> impl<S, E> BodyStream<S>
where where
S: Stream<Item = Result<Bytes, E>> + Unpin, S: Stream<Item = Result<Bytes, E>> + Unpin,
E: Into<Error>, E: Into<Error>,
{ {
pub fn new(stream: S) -> Self { pub fn new(stream: S) -> Self {
BodyStream { BodyStream { stream }
stream,
_t: PhantomData,
}
} }
} }
impl<S, E> MessageBody for BodyStream<S, E> impl<S, E> MessageBody for BodyStream<S>
where where
S: Stream<Item = Result<Bytes, E>> + Unpin, S: Stream<Item = Result<Bytes, E>> + Unpin,
E: Into<Error>, E: Into<Error>,
@ -413,13 +393,12 @@ where
/// ended on a zero-length chunk, but rather proceed until the underlying /// ended on a zero-length chunk, but rather proceed until the underlying
/// [`Stream`] ends. /// [`Stream`] ends.
fn poll_next( fn poll_next(
self: Pin<&mut Self>, mut self: Pin<&mut Self>,
cx: &mut Context<'_>, cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> { ) -> Poll<Option<Result<Bytes, Error>>> {
let mut stream = self.project().stream;
loop { loop {
let stream = stream.as_mut(); let stream = &mut self.as_mut().stream;
return Poll::Ready(match ready!(stream.poll_next(cx)) { return Poll::Ready(match ready!(Pin::new(stream).poll_next(cx)) {
Some(Ok(ref bytes)) if bytes.is_empty() => continue, Some(Ok(ref bytes)) if bytes.is_empty() => continue,
opt => opt.map(|res| res.map_err(Into::into)), opt => opt.map(|res| res.map_err(Into::into)),
}); });
@ -429,10 +408,8 @@ where
/// Type represent streaming body. This body implementation should be used /// Type represent streaming body. This body implementation should be used
/// if total size of stream is known. Data get sent as is without using transfer encoding. /// if total size of stream is known. Data get sent as is without using transfer encoding.
#[pin_project]
pub struct SizedStream<S: Unpin> { pub struct SizedStream<S: Unpin> {
size: u64, size: u64,
#[pin]
stream: S, stream: S,
} }
@ -459,13 +436,12 @@ where
/// ended on a zero-length chunk, but rather proceed until the underlying /// ended on a zero-length chunk, but rather proceed until the underlying
/// [`Stream`] ends. /// [`Stream`] ends.
fn poll_next( fn poll_next(
self: Pin<&mut Self>, mut self: Pin<&mut Self>,
cx: &mut Context<'_>, cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> { ) -> Poll<Option<Result<Bytes, Error>>> {
let mut stream: Pin<&mut S> = self.project().stream;
loop { loop {
let stream = stream.as_mut(); let stream = &mut self.as_mut().stream;
return Poll::Ready(match ready!(stream.poll_next(cx)) { return Poll::Ready(match ready!(Pin::new(stream).poll_next(cx)) {
Some(Ok(ref bytes)) if bytes.is_empty() => continue, Some(Ok(ref bytes)) if bytes.is_empty() => continue,
val => val, val => val,
}); });
@ -476,9 +452,9 @@ where
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use futures_util::stream;
use futures_util::future::poll_fn; use futures_util::future::poll_fn;
use futures_util::pin_mut; use futures_util::pin_mut;
use futures_util::stream;
impl Body { impl Body {
pub(crate) fn get_ref(&self) -> &[u8] { pub(crate) fn get_ref(&self) -> &[u8] {
@ -612,10 +588,6 @@ mod tests {
#[actix_rt::test] #[actix_rt::test]
async fn test_body_eq() { async fn test_body_eq() {
assert!(Body::None == Body::None);
assert!(Body::None != Body::Empty);
assert!(Body::Empty == Body::Empty);
assert!(Body::Empty != Body::None);
assert!( assert!(
Body::Bytes(Bytes::from_static(b"1")) Body::Bytes(Bytes::from_static(b"1"))
== Body::Bytes(Bytes::from_static(b"1")) == Body::Bytes(Bytes::from_static(b"1"))
@ -627,7 +599,7 @@ mod tests {
async fn test_body_debug() { async fn test_body_debug() {
assert!(format!("{:?}", Body::None).contains("Body::None")); assert!(format!("{:?}", Body::None).contains("Body::None"));
assert!(format!("{:?}", Body::Empty).contains("Body::Empty")); assert!(format!("{:?}", Body::Empty).contains("Body::Empty"));
assert!(format!("{:?}", Body::Bytes(Bytes::from_static(b"1"))).contains("1")); assert!(format!("{:?}", Body::Bytes(Bytes::from_static(b"1"))).contains('1'));
} }
#[actix_rt::test] #[actix_rt::test]
@ -729,7 +701,7 @@ mod tests {
let body = resp_body.downcast_ref::<String>().unwrap(); let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast"); assert_eq!(body, "hello cast");
let body = &mut resp_body.downcast_mut::<String>().unwrap(); let body = &mut resp_body.downcast_mut::<String>().unwrap();
body.push_str("!"); body.push('!');
let body = resp_body.downcast_ref::<String>().unwrap(); let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast!"); assert_eq!(body, "hello cast!");
let not_body = resp_body.downcast_ref::<()>(); let not_body = resp_body.downcast_ref::<()>();

View File

@ -10,16 +10,16 @@ use crate::config::{KeepAlive, ServiceConfig};
use crate::error::Error; use crate::error::Error;
use crate::h1::{Codec, ExpectHandler, H1Service, UpgradeHandler}; use crate::h1::{Codec, ExpectHandler, H1Service, UpgradeHandler};
use crate::h2::H2Service; use crate::h2::H2Service;
use crate::helpers::{Data, DataFactory};
use crate::request::Request; use crate::request::Request;
use crate::response::Response; use crate::response::Response;
use crate::service::HttpService; use crate::service::HttpService;
use crate::{ConnectCallback, Extensions};
/// A http service builder /// A HTTP service builder
/// ///
/// This type can be used to construct an instance of `http service` through a /// This type can be used to construct an instance of [`HttpService`] through a
/// builder-like pattern. /// builder-like pattern.
pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler<T>> { pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler> {
keep_alive: KeepAlive, keep_alive: KeepAlive,
client_timeout: u64, client_timeout: u64,
client_disconnect: u64, client_disconnect: u64,
@ -27,16 +27,16 @@ pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler<T>> {
local_addr: Option<net::SocketAddr>, local_addr: Option<net::SocketAddr>,
expect: X, expect: X,
upgrade: Option<U>, upgrade: Option<U>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>, on_connect_ext: Option<Rc<ConnectCallback<T>>>,
_t: PhantomData<(T, S)>, _phantom: PhantomData<S>,
} }
impl<T, S> HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler<T>> impl<T, S> HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler>
where where
S: ServiceFactory<Config = (), Request = Request>, S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Error> + 'static, S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
<S::Service as Service>::Future: 'static, <S::Service as Service<Request>>::Future: 'static,
{ {
/// Create instance of `ServiceConfigBuilder` /// Create instance of `ServiceConfigBuilder`
pub fn new() -> Self { pub fn new() -> Self {
@ -48,26 +48,26 @@ where
local_addr: None, local_addr: None,
expect: ExpectHandler, expect: ExpectHandler,
upgrade: None, upgrade: None,
on_connect: None, on_connect_ext: None,
_t: PhantomData, _phantom: PhantomData,
} }
} }
} }
impl<T, S, X, U> HttpServiceBuilder<T, S, X, U> impl<T, S, X, U> HttpServiceBuilder<T, S, X, U>
where where
S: ServiceFactory<Config = (), Request = Request>, S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Error> + 'static, S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
<S::Service as Service>::Future: 'static, <S::Service as Service<Request>>::Future: 'static,
X: ServiceFactory<Config = (), Request = Request, Response = Request>, X: ServiceFactory<Request, Config = (), Response = Request>,
X::Error: Into<Error>, X::Error: Into<Error>,
X::InitError: fmt::Debug, X::InitError: fmt::Debug,
<X::Service as Service>::Future: 'static, <X::Service as Service<Request>>::Future: 'static,
U: ServiceFactory<Config = (), Request = (Request, Framed<T, Codec>), Response = ()>, U: ServiceFactory<(Request, Framed<T, Codec>), Config = (), Response = ()>,
U::Error: fmt::Display, U::Error: fmt::Display,
U::InitError: fmt::Debug, U::InitError: fmt::Debug,
<U::Service as Service>::Future: 'static, <U::Service as Service<(Request, Framed<T, Codec>)>>::Future: 'static,
{ {
/// Set server keep-alive setting. /// Set server keep-alive setting.
/// ///
@ -123,11 +123,11 @@ where
/// request will be forwarded to main service. /// request will be forwarded to main service.
pub fn expect<F, X1>(self, expect: F) -> HttpServiceBuilder<T, S, X1, U> pub fn expect<F, X1>(self, expect: F) -> HttpServiceBuilder<T, S, X1, U>
where where
F: IntoServiceFactory<X1>, F: IntoServiceFactory<X1, Request>,
X1: ServiceFactory<Config = (), Request = Request, Response = Request>, X1: ServiceFactory<Request, Config = (), Response = Request>,
X1::Error: Into<Error>, X1::Error: Into<Error>,
X1::InitError: fmt::Debug, X1::InitError: fmt::Debug,
<X1::Service as Service>::Future: 'static, <X1::Service as Service<Request>>::Future: 'static,
{ {
HttpServiceBuilder { HttpServiceBuilder {
keep_alive: self.keep_alive, keep_alive: self.keep_alive,
@ -137,8 +137,8 @@ where
local_addr: self.local_addr, local_addr: self.local_addr,
expect: expect.into_factory(), expect: expect.into_factory(),
upgrade: self.upgrade, upgrade: self.upgrade,
on_connect: self.on_connect, on_connect_ext: self.on_connect_ext,
_t: PhantomData, _phantom: PhantomData,
} }
} }
@ -148,15 +148,11 @@ where
/// and this service get called with original request and framed object. /// and this service get called with original request and framed object.
pub fn upgrade<F, U1>(self, upgrade: F) -> HttpServiceBuilder<T, S, X, U1> pub fn upgrade<F, U1>(self, upgrade: F) -> HttpServiceBuilder<T, S, X, U1>
where where
F: IntoServiceFactory<U1>, F: IntoServiceFactory<U1, (Request, Framed<T, Codec>)>,
U1: ServiceFactory< U1: ServiceFactory<(Request, Framed<T, Codec>), Config = (), Response = ()>,
Config = (),
Request = (Request, Framed<T, Codec>),
Response = (),
>,
U1::Error: fmt::Display, U1::Error: fmt::Display,
U1::InitError: fmt::Debug, U1::InitError: fmt::Debug,
<U1::Service as Service>::Future: 'static, <U1::Service as Service<(Request, Framed<T, Codec>)>>::Future: 'static,
{ {
HttpServiceBuilder { HttpServiceBuilder {
keep_alive: self.keep_alive, keep_alive: self.keep_alive,
@ -166,29 +162,29 @@ where
local_addr: self.local_addr, local_addr: self.local_addr,
expect: self.expect, expect: self.expect,
upgrade: Some(upgrade.into_factory()), upgrade: Some(upgrade.into_factory()),
on_connect: self.on_connect, on_connect_ext: self.on_connect_ext,
_t: PhantomData, _phantom: PhantomData,
} }
} }
/// Set on-connect callback. /// Sets the callback to be run on connection establishment.
/// ///
/// It get called once per connection and result of the call /// Has mutable access to a data container that will be merged into request extensions.
/// get stored to the request's extensions. /// This enables transport layer data (like client certificates) to be accessed in middleware
pub fn on_connect<F, I>(mut self, f: F) -> Self /// and handlers.
pub fn on_connect_ext<F>(mut self, f: F) -> Self
where where
F: Fn(&T) -> I + 'static, F: Fn(&T, &mut Extensions) + 'static,
I: Clone + 'static,
{ {
self.on_connect = Some(Rc::new(move |io| Box::new(Data(f(io))))); self.on_connect_ext = Some(Rc::new(f));
self self
} }
/// Finish service configuration and create *http service* for HTTP/1 protocol. /// Finish service configuration and create a HTTP Service for HTTP/1 protocol.
pub fn h1<F, B>(self, service: F) -> H1Service<T, S, B, X, U> pub fn h1<F, B>(self, service: F) -> H1Service<T, S, B, X, U>
where where
B: MessageBody, B: MessageBody,
F: IntoServiceFactory<S>, F: IntoServiceFactory<S, Request>,
S::Error: Into<Error>, S::Error: Into<Error>,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
S::Response: Into<Response<B>>, S::Response: Into<Response<B>>,
@ -200,21 +196,22 @@ where
self.secure, self.secure,
self.local_addr, self.local_addr,
); );
H1Service::with_config(cfg, service.into_factory()) H1Service::with_config(cfg, service.into_factory())
.expect(self.expect) .expect(self.expect)
.upgrade(self.upgrade) .upgrade(self.upgrade)
.on_connect(self.on_connect) .on_connect_ext(self.on_connect_ext)
} }
/// Finish service configuration and create *http service* for HTTP/2 protocol. /// Finish service configuration and create a HTTP service for HTTP/2 protocol.
pub fn h2<F, B>(self, service: F) -> H2Service<T, S, B> pub fn h2<F, B>(self, service: F) -> H2Service<T, S, B>
where where
B: MessageBody + 'static, B: MessageBody + 'static,
F: IntoServiceFactory<S>, F: IntoServiceFactory<S, Request>,
S::Error: Into<Error> + 'static, S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static, <S::Service as Service<Request>>::Future: 'static,
{ {
let cfg = ServiceConfig::new( let cfg = ServiceConfig::new(
self.keep_alive, self.keep_alive,
@ -223,18 +220,20 @@ where
self.secure, self.secure,
self.local_addr, self.local_addr,
); );
H2Service::with_config(cfg, service.into_factory()).on_connect(self.on_connect)
H2Service::with_config(cfg, service.into_factory())
.on_connect_ext(self.on_connect_ext)
} }
/// Finish service configuration and create `HttpService` instance. /// Finish service configuration and create `HttpService` instance.
pub fn finish<F, B>(self, service: F) -> HttpService<T, S, B, X, U> pub fn finish<F, B>(self, service: F) -> HttpService<T, S, B, X, U>
where where
B: MessageBody + 'static, B: MessageBody + 'static,
F: IntoServiceFactory<S>, F: IntoServiceFactory<S, Request>,
S::Error: Into<Error> + 'static, S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static, <S::Service as Service<Request>>::Future: 'static,
{ {
let cfg = ServiceConfig::new( let cfg = ServiceConfig::new(
self.keep_alive, self.keep_alive,
@ -243,9 +242,10 @@ where
self.secure, self.secure,
self.local_addr, self.local_addr,
); );
HttpService::with_config(cfg, service.into_factory()) HttpService::with_config(cfg, service.into_factory())
.expect(self.expect) .expect(self.expect)
.upgrade(self.upgrade) .upgrade(self.upgrade)
.on_connect(self.on_connect) .on_connect_ext(self.on_connect_ext)
} }
} }

View File

@ -1,8 +1,7 @@
use std::time::Duration; use std::time::Duration;
// These values are taken from hyper/src/proto/h2/client.rs const DEFAULT_H2_CONN_WINDOW: u32 = 1024 * 1024 * 2; // 2MB
const DEFAULT_H2_CONN_WINDOW: u32 = 1024 * 1024 * 2; // 2mb const DEFAULT_H2_STREAM_WINDOW: u32 = 1024 * 1024; // 1MB
const DEFAULT_H2_STREAM_WINDOW: u32 = 1024 * 1024; // 1mb
/// Connector configuration /// Connector configuration
#[derive(Clone)] #[derive(Clone)]
@ -19,7 +18,7 @@ pub(crate) struct ConnectorConfig {
impl Default for ConnectorConfig { impl Default for ConnectorConfig {
fn default() -> Self { fn default() -> Self {
Self { Self {
timeout: Duration::from_secs(1), timeout: Duration::from_secs(5),
conn_lifetime: Duration::from_secs(75), conn_lifetime: Duration::from_secs(75),
conn_keep_alive: Duration::from_secs(15), conn_keep_alive: Duration::from_secs(15),
disconnect_timeout: Some(Duration::from_millis(3000)), disconnect_timeout: Some(Duration::from_millis(3000)),

View File

@ -1,11 +1,14 @@
use std::future::Future; use std::future::Future;
use std::ops::{Deref, DerefMut};
use std::pin::Pin; use std::pin::Pin;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use std::{fmt, io, mem, time}; use std::{fmt, io, time};
use actix_codec::{AsyncRead, AsyncWrite, Framed}; use actix_codec::{AsyncRead, AsyncWrite, Framed, ReadBuf};
use bytes::{Buf, Bytes}; use actix_rt::task::JoinHandle;
use futures_util::future::{err, Either, FutureExt, LocalBoxFuture, Ready}; use bytes::Bytes;
use futures_core::future::LocalBoxFuture;
use futures_util::future::{err, Either, FutureExt, Ready};
use h2::client::SendRequest; use h2::client::SendRequest;
use pin_project::pin_project; use pin_project::pin_project;
@ -20,7 +23,53 @@ use super::{h1proto, h2proto};
pub(crate) enum ConnectionType<Io> { pub(crate) enum ConnectionType<Io> {
H1(Io), H1(Io),
H2(SendRequest<Bytes>), H2(H2Connection),
}
// h2 connection has two parts: SendRequest and Connection.
// Connection is spawned as async task on runtime and H2Connection would hold a handle for
// this task. So it can wake up and quit the task when SendRequest is dropped.
pub(crate) struct H2Connection {
handle: JoinHandle<()>,
sender: SendRequest<Bytes>,
}
impl H2Connection {
pub(crate) fn new<Io>(
sender: SendRequest<Bytes>,
connection: h2::client::Connection<Io>,
) -> Self
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
let handle = actix_rt::spawn(async move {
let _ = connection.await;
});
Self { handle, sender }
}
}
// wake up waker when drop
impl Drop for H2Connection {
fn drop(&mut self) {
self.handle.abort();
}
}
// only expose sender type to public.
impl Deref for H2Connection {
type Target = SendRequest<Bytes>;
fn deref(&self) -> &Self::Target {
&self.sender
}
}
impl DerefMut for H2Connection {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.sender
}
} }
pub trait Connection { pub trait Connection {
@ -46,10 +95,10 @@ pub trait Connection {
pub(crate) trait ConnectionLifetime: AsyncRead + AsyncWrite + 'static { pub(crate) trait ConnectionLifetime: AsyncRead + AsyncWrite + 'static {
/// Close connection /// Close connection
fn close(&mut self); fn close(self: Pin<&mut Self>);
/// Release connection to the connection pool /// Release connection to the connection pool
fn release(&mut self); fn release(self: Pin<&mut Self>);
} }
#[doc(hidden)] #[doc(hidden)]
@ -195,11 +244,15 @@ where
match self { match self {
EitherConnection::A(con) => con EitherConnection::A(con) => con
.open_tunnel(head) .open_tunnel(head)
.map(|res| res.map(|(head, framed)| (head, framed.map_io(EitherIo::A)))) .map(|res| {
res.map(|(head, framed)| (head, framed.into_map_io(EitherIo::A)))
})
.boxed_local(), .boxed_local(),
EitherConnection::B(con) => con EitherConnection::B(con) => con
.open_tunnel(head) .open_tunnel(head)
.map(|res| res.map(|(head, framed)| (head, framed.map_io(EitherIo::B)))) .map(|res| {
res.map(|(head, framed)| (head, framed.into_map_io(EitherIo::B)))
})
.boxed_local(), .boxed_local(),
} }
} }
@ -219,23 +272,13 @@ where
fn poll_read( fn poll_read(
self: Pin<&mut Self>, self: Pin<&mut Self>,
cx: &mut Context<'_>, cx: &mut Context<'_>,
buf: &mut [u8], buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<usize>> { ) -> Poll<io::Result<()>> {
match self.project() { match self.project() {
EitherIoProj::A(val) => val.poll_read(cx, buf), EitherIoProj::A(val) => val.poll_read(cx, buf),
EitherIoProj::B(val) => val.poll_read(cx, buf), EitherIoProj::B(val) => val.poll_read(cx, buf),
} }
} }
unsafe fn prepare_uninitialized_buffer(
&self,
buf: &mut [mem::MaybeUninit<u8>],
) -> bool {
match self {
EitherIo::A(ref val) => val.prepare_uninitialized_buffer(buf),
EitherIo::B(ref val) => val.prepare_uninitialized_buffer(buf),
}
}
} }
impl<A, B> AsyncWrite for EitherIo<A, B> impl<A, B> AsyncWrite for EitherIo<A, B>
@ -270,18 +313,36 @@ where
EitherIoProj::B(val) => val.poll_shutdown(cx), EitherIoProj::B(val) => val.poll_shutdown(cx),
} }
} }
}
fn poll_write_buf<U: Buf>( #[cfg(test)]
self: Pin<&mut Self>, mod test {
cx: &mut Context<'_>, use std::net;
buf: &mut U,
) -> Poll<Result<usize, io::Error>> use actix_rt::net::TcpStream;
where
Self: Sized, use super::*;
{
match self.project() { #[actix_rt::test]
EitherIoProj::A(val) => val.poll_write_buf(cx, buf), async fn test_h2_connection_drop() {
EitherIoProj::B(val) => val.poll_write_buf(cx, buf), let addr = "127.0.0.1:0".parse::<net::SocketAddr>().unwrap();
} let listener = net::TcpListener::bind(addr).unwrap();
let local = listener.local_addr().unwrap();
std::thread::spawn(move || while listener.accept().is_ok() {});
let tcp = TcpStream::connect(local).await.unwrap();
let (sender, connection) = h2::client::handshake(tcp).await.unwrap();
let conn = H2Connection::new(sender.clone(), connection);
assert!(sender.clone().ready().await.is_ok());
assert!(h2::client::SendRequest::clone(&*conn).ready().await.is_ok());
drop(conn);
match sender.ready().await {
Ok(_) => panic!("connection should be gone and can not be ready"),
Err(e) => assert!(e.is_io()),
};
} }
} }

View File

@ -3,11 +3,11 @@ use std::marker::PhantomData;
use std::time::Duration; use std::time::Duration;
use actix_codec::{AsyncRead, AsyncWrite}; use actix_codec::{AsyncRead, AsyncWrite};
use actix_connect::{
default_connector, Connect as TcpConnect, Connection as TcpConnection,
};
use actix_rt::net::TcpStream; use actix_rt::net::TcpStream;
use actix_service::{apply_fn, Service}; use actix_service::{apply_fn, Service, ServiceExt};
use actix_tls::connect::{
new_connector, Connect as TcpConnect, Connection as TcpConnection, Resolver,
};
use actix_utils::timeout::{TimeoutError, TimeoutService}; use actix_utils::timeout::{TimeoutError, TimeoutService};
use http::Uri; use http::Uri;
@ -18,10 +18,9 @@ use super::pool::{ConnectionPool, Protocol};
use super::Connect; use super::Connect;
#[cfg(feature = "openssl")] #[cfg(feature = "openssl")]
use actix_connect::ssl::openssl::SslConnector as OpensslConnector; use actix_tls::connect::ssl::openssl::SslConnector as OpensslConnector;
#[cfg(feature = "rustls")] #[cfg(feature = "rustls")]
use actix_connect::ssl::rustls::ClientConfig; use actix_tls::connect::ssl::rustls::ClientConfig;
#[cfg(feature = "rustls")] #[cfg(feature = "rustls")]
use std::sync::Arc; use std::sync::Arc;
@ -52,7 +51,7 @@ pub struct Connector<T, U> {
config: ConnectorConfig, config: ConnectorConfig,
#[allow(dead_code)] #[allow(dead_code)]
ssl: SslConnector, ssl: SslConnector,
_t: PhantomData<U>, _phantom: PhantomData<U>,
} }
trait Io: AsyncRead + AsyncWrite + Unpin {} trait Io: AsyncRead + AsyncWrite + Unpin {}
@ -62,24 +61,24 @@ impl Connector<(), ()> {
#[allow(clippy::new_ret_no_self, clippy::let_unit_value)] #[allow(clippy::new_ret_no_self, clippy::let_unit_value)]
pub fn new() -> Connector< pub fn new() -> Connector<
impl Service< impl Service<
Request = TcpConnect<Uri>, TcpConnect<Uri>,
Response = TcpConnection<Uri, TcpStream>, Response = TcpConnection<Uri, TcpStream>,
Error = actix_connect::ConnectError, Error = actix_tls::connect::ConnectError,
> + Clone, > + Clone,
TcpStream, TcpStream,
> { > {
Connector { Connector {
ssl: Self::build_ssl(vec![b"h2".to_vec(), b"http/1.1".to_vec()]), ssl: Self::build_ssl(vec![b"h2".to_vec(), b"http/1.1".to_vec()]),
connector: default_connector(), connector: new_connector(resolver::resolver()),
config: ConnectorConfig::default(), config: ConnectorConfig::default(),
_t: PhantomData, _phantom: PhantomData,
} }
} }
// Build Ssl connector with openssl, based on supplied alpn protocols // Build Ssl connector with openssl, based on supplied alpn protocols
#[cfg(feature = "openssl")] #[cfg(feature = "openssl")]
fn build_ssl(protocols: Vec<Vec<u8>>) -> SslConnector { fn build_ssl(protocols: Vec<Vec<u8>>) -> SslConnector {
use actix_connect::ssl::openssl::SslMethod; use actix_tls::connect::ssl::openssl::SslMethod;
use bytes::{BufMut, BytesMut}; use bytes::{BufMut, BytesMut};
let mut alpn = BytesMut::with_capacity(20); let mut alpn = BytesMut::with_capacity(20);
@ -100,9 +99,9 @@ impl Connector<(), ()> {
fn build_ssl(protocols: Vec<Vec<u8>>) -> SslConnector { fn build_ssl(protocols: Vec<Vec<u8>>) -> SslConnector {
let mut config = ClientConfig::new(); let mut config = ClientConfig::new();
config.set_protocols(&protocols); config.set_protocols(&protocols);
config config.root_store.add_server_trust_anchors(
.root_store &actix_tls::connect::ssl::rustls::TLS_SERVER_ROOTS,
.add_server_trust_anchors(&actix_tls::rustls::TLS_SERVER_ROOTS); );
SslConnector::Rustls(Arc::new(config)) SslConnector::Rustls(Arc::new(config))
} }
@ -117,16 +116,16 @@ impl<T, U> Connector<T, U> {
where where
U1: AsyncRead + AsyncWrite + Unpin + fmt::Debug, U1: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
T1: Service< T1: Service<
Request = TcpConnect<Uri>, TcpConnect<Uri>,
Response = TcpConnection<Uri, U1>, Response = TcpConnection<Uri, U1>,
Error = actix_connect::ConnectError, Error = actix_tls::connect::ConnectError,
> + Clone, > + Clone,
{ {
Connector { Connector {
connector, connector,
config: self.config, config: self.config,
ssl: self.ssl, ssl: self.ssl,
_t: PhantomData, _phantom: PhantomData,
} }
} }
} }
@ -135,9 +134,9 @@ impl<T, U> Connector<T, U>
where where
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static, U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
T: Service< T: Service<
Request = TcpConnect<Uri>, TcpConnect<Uri>,
Response = TcpConnection<Uri, U>, Response = TcpConnection<Uri, U>,
Error = actix_connect::ConnectError, Error = actix_tls::connect::ConnectError,
> + Clone > + Clone
+ 'static, + 'static,
{ {
@ -241,8 +240,8 @@ where
/// its combinator chain. /// its combinator chain.
pub fn finish( pub fn finish(
self, self,
) -> impl Service<Request = Connect, Response = impl Connection, Error = ConnectError> ) -> impl Service<Connect, Response = impl Connection, Error = ConnectError> + Clone
+ Clone { {
#[cfg(not(any(feature = "openssl", feature = "rustls")))] #[cfg(not(any(feature = "openssl", feature = "rustls")))]
{ {
let connector = TimeoutService::new( let connector = TimeoutService::new(
@ -268,11 +267,11 @@ where
#[cfg(any(feature = "openssl", feature = "rustls"))] #[cfg(any(feature = "openssl", feature = "rustls"))]
{ {
const H2: &[u8] = b"h2"; const H2: &[u8] = b"h2";
#[cfg(feature = "openssl")]
use actix_connect::ssl::openssl::OpensslConnector;
#[cfg(feature = "rustls")]
use actix_connect::ssl::rustls::{RustlsConnector, Session};
use actix_service::{boxed::service, pipeline}; use actix_service::{boxed::service, pipeline};
#[cfg(feature = "openssl")]
use actix_tls::connect::ssl::openssl::OpensslConnector;
#[cfg(feature = "rustls")]
use actix_tls::connect::ssl::rustls::{RustlsConnector, Session};
let ssl_service = TimeoutService::new( let ssl_service = TimeoutService::new(
self.config.timeout, self.config.timeout,
@ -363,8 +362,7 @@ mod connect_impl {
pub(crate) struct InnerConnector<T, Io> pub(crate) struct InnerConnector<T, Io>
where where
Io: AsyncRead + AsyncWrite + Unpin + 'static, Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError> T: Service<Connect, Response = (Io, Protocol), Error = ConnectError> + 'static,
+ 'static,
{ {
pub(crate) tcp_pool: ConnectionPool<T, Io>, pub(crate) tcp_pool: ConnectionPool<T, Io>,
} }
@ -372,8 +370,7 @@ mod connect_impl {
impl<T, Io> Clone for InnerConnector<T, Io> impl<T, Io> Clone for InnerConnector<T, Io>
where where
Io: AsyncRead + AsyncWrite + Unpin + 'static, Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError> T: Service<Connect, Response = (Io, Protocol), Error = ConnectError> + 'static,
+ 'static,
{ {
fn clone(&self) -> Self { fn clone(&self) -> Self {
InnerConnector { InnerConnector {
@ -382,25 +379,23 @@ mod connect_impl {
} }
} }
impl<T, Io> Service for InnerConnector<T, Io> impl<T, Io> Service<Connect> for InnerConnector<T, Io>
where where
Io: AsyncRead + AsyncWrite + Unpin + 'static, Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError> T: Service<Connect, Response = (Io, Protocol), Error = ConnectError> + 'static,
+ 'static,
{ {
type Request = Connect;
type Response = IoConnection<Io>; type Response = IoConnection<Io>;
type Error = ConnectError; type Error = ConnectError;
type Future = Either< type Future = Either<
<ConnectionPool<T, Io> as Service>::Future, <ConnectionPool<T, Io> as Service<Connect>>::Future,
Ready<Result<IoConnection<Io>, ConnectError>>, Ready<Result<IoConnection<Io>, ConnectError>>,
>; >;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.tcp_pool.poll_ready(cx) self.tcp_pool.poll_ready(cx)
} }
fn call(&mut self, req: Connect) -> Self::Future { fn call(&self, req: Connect) -> Self::Future {
match req.uri.scheme_str() { match req.uri.scheme_str() {
Some("https") | Some("wss") => { Some("https") | Some("wss") => {
Either::Right(err(ConnectError::SslIsNotSupported)) Either::Right(err(ConnectError::SslIsNotSupported))
@ -428,8 +423,8 @@ mod connect_impl {
where where
Io1: AsyncRead + AsyncWrite + Unpin + 'static, Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static, Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>, T1: Service<Connect, Response = (Io1, Protocol), Error = ConnectError>,
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>, T2: Service<Connect, Response = (Io2, Protocol), Error = ConnectError>,
{ {
pub(crate) tcp_pool: ConnectionPool<T1, Io1>, pub(crate) tcp_pool: ConnectionPool<T1, Io1>,
pub(crate) ssl_pool: ConnectionPool<T2, Io2>, pub(crate) ssl_pool: ConnectionPool<T2, Io2>,
@ -439,10 +434,8 @@ mod connect_impl {
where where
Io1: AsyncRead + AsyncWrite + Unpin + 'static, Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static, Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError> T1: Service<Connect, Response = (Io1, Protocol), Error = ConnectError> + 'static,
+ 'static, T2: Service<Connect, Response = (Io2, Protocol), Error = ConnectError> + 'static,
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
{ {
fn clone(&self) -> Self { fn clone(&self) -> Self {
InnerConnector { InnerConnector {
@ -452,16 +445,13 @@ mod connect_impl {
} }
} }
impl<T1, T2, Io1, Io2> Service for InnerConnector<T1, T2, Io1, Io2> impl<T1, T2, Io1, Io2> Service<Connect> for InnerConnector<T1, T2, Io1, Io2>
where where
Io1: AsyncRead + AsyncWrite + Unpin + 'static, Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static, Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError> T1: Service<Connect, Response = (Io1, Protocol), Error = ConnectError> + 'static,
+ 'static, T2: Service<Connect, Response = (Io2, Protocol), Error = ConnectError> + 'static,
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
{ {
type Request = Connect;
type Response = EitherConnection<Io1, Io2>; type Response = EitherConnection<Io1, Io2>;
type Error = ConnectError; type Error = ConnectError;
type Future = Either< type Future = Either<
@ -469,19 +459,19 @@ mod connect_impl {
InnerConnectorResponseB<T2, Io1, Io2>, InnerConnectorResponseB<T2, Io1, Io2>,
>; >;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.tcp_pool.poll_ready(cx) self.tcp_pool.poll_ready(cx)
} }
fn call(&mut self, req: Connect) -> Self::Future { fn call(&self, req: Connect) -> Self::Future {
match req.uri.scheme_str() { match req.uri.scheme_str() {
Some("https") | Some("wss") => Either::Right(InnerConnectorResponseB { Some("https") | Some("wss") => Either::Right(InnerConnectorResponseB {
fut: self.ssl_pool.call(req), fut: self.ssl_pool.call(req),
_t: PhantomData, _phantom: PhantomData,
}), }),
_ => Either::Left(InnerConnectorResponseA { _ => Either::Left(InnerConnectorResponseA {
fut: self.tcp_pool.call(req), fut: self.tcp_pool.call(req),
_t: PhantomData, _phantom: PhantomData,
}), }),
} }
} }
@ -491,18 +481,16 @@ mod connect_impl {
pub(crate) struct InnerConnectorResponseA<T, Io1, Io2> pub(crate) struct InnerConnectorResponseA<T, Io1, Io2>
where where
Io1: AsyncRead + AsyncWrite + Unpin + 'static, Io1: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError> T: Service<Connect, Response = (Io1, Protocol), Error = ConnectError> + 'static,
+ 'static,
{ {
#[pin] #[pin]
fut: <ConnectionPool<T, Io1> as Service>::Future, fut: <ConnectionPool<T, Io1> as Service<Connect>>::Future,
_t: PhantomData<Io2>, _phantom: PhantomData<Io2>,
} }
impl<T, Io1, Io2> Future for InnerConnectorResponseA<T, Io1, Io2> impl<T, Io1, Io2> Future for InnerConnectorResponseA<T, Io1, Io2>
where where
T: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError> T: Service<Connect, Response = (Io1, Protocol), Error = ConnectError> + 'static,
+ 'static,
Io1: AsyncRead + AsyncWrite + Unpin + 'static, Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static, Io2: AsyncRead + AsyncWrite + Unpin + 'static,
{ {
@ -520,18 +508,16 @@ mod connect_impl {
pub(crate) struct InnerConnectorResponseB<T, Io1, Io2> pub(crate) struct InnerConnectorResponseB<T, Io1, Io2>
where where
Io2: AsyncRead + AsyncWrite + Unpin + 'static, Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError> T: Service<Connect, Response = (Io2, Protocol), Error = ConnectError> + 'static,
+ 'static,
{ {
#[pin] #[pin]
fut: <ConnectionPool<T, Io2> as Service>::Future, fut: <ConnectionPool<T, Io2> as Service<Connect>>::Future,
_t: PhantomData<Io1>, _phantom: PhantomData<Io1>,
} }
impl<T, Io1, Io2> Future for InnerConnectorResponseB<T, Io1, Io2> impl<T, Io1, Io2> Future for InnerConnectorResponseB<T, Io1, Io2>
where where
T: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError> T: Service<Connect, Response = (Io2, Protocol), Error = ConnectError> + 'static,
+ 'static,
Io1: AsyncRead + AsyncWrite + Unpin + 'static, Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static, Io2: AsyncRead + AsyncWrite + Unpin + 'static,
{ {
@ -545,3 +531,82 @@ mod connect_impl {
} }
} }
} }
#[cfg(not(feature = "trust-dns"))]
mod resolver {
use super::*;
pub(super) fn resolver() -> Resolver {
Resolver::Default
}
}
#[cfg(feature = "trust-dns")]
mod resolver {
use std::{cell::RefCell, net::SocketAddr};
use actix_tls::connect::Resolve;
use futures_core::future::LocalBoxFuture;
use trust_dns_resolver::{
config::{ResolverConfig, ResolverOpts},
system_conf::read_system_conf,
TokioAsyncResolver,
};
use super::*;
pub(super) fn resolver() -> Resolver {
// new type for impl Resolve trait for TokioAsyncResolver.
struct TrustDnsResolver(TokioAsyncResolver);
impl Resolve for TrustDnsResolver {
fn lookup<'a>(
&'a self,
host: &'a str,
port: u16,
) -> LocalBoxFuture<'a, Result<Vec<SocketAddr>, Box<dyn std::error::Error>>>
{
Box::pin(async move {
let res = self
.0
.lookup_ip(host)
.await?
.iter()
.map(|ip| SocketAddr::new(ip, port))
.collect();
Ok(res)
})
}
}
// dns struct is cached in thread local.
// so new client constructor can reuse the existing dns resolver.
thread_local! {
static TRUST_DNS_RESOLVER: RefCell<Option<Resolver>> = RefCell::new(None);
}
// get from thread local or construct a new trust-dns resolver.
TRUST_DNS_RESOLVER.with(|local| {
let resolver = local.borrow().as_ref().map(Clone::clone);
match resolver {
Some(resolver) => resolver,
None => {
let (cfg, opts) = match read_system_conf() {
Ok((cfg, opts)) => (cfg, opts),
Err(e) => {
log::error!("TRust-DNS can not load system config: {}", e);
(ResolverConfig::default(), ResolverOpts::default())
}
};
let resolver = TokioAsyncResolver::tokio(cfg, opts).unwrap();
// box trust dns resolver and put it in thread local.
let resolver = Resolver::new_custom(TrustDnsResolver(resolver));
*local.borrow_mut() = Some(resolver.clone());
resolver
}
}
})
}
}

View File

@ -1,10 +1,9 @@
use std::io; use std::io;
use actix_connect::resolver::ResolveError;
use derive_more::{Display, From}; use derive_more::{Display, From};
#[cfg(feature = "openssl")] #[cfg(feature = "openssl")]
use actix_connect::ssl::openssl::{HandshakeError, SslError}; use actix_tls::accept::openssl::SslError;
use crate::error::{Error, ParseError, ResponseError}; use crate::error::{Error, ParseError, ResponseError};
use crate::http::{Error as HttpError, StatusCode}; use crate::http::{Error as HttpError, StatusCode};
@ -21,14 +20,9 @@ pub enum ConnectError {
#[display(fmt = "{}", _0)] #[display(fmt = "{}", _0)]
SslError(SslError), SslError(SslError),
/// SSL Handshake error
#[cfg(feature = "openssl")]
#[display(fmt = "{}", _0)]
SslHandshakeError(String),
/// Failed to resolve the hostname /// Failed to resolve the hostname
#[display(fmt = "Failed resolving hostname: {}", _0)] #[display(fmt = "Failed resolving hostname: {}", _0)]
Resolver(ResolveError), Resolver(Box<dyn std::error::Error>),
/// No dns records /// No dns records
#[display(fmt = "No dns records found for the input")] #[display(fmt = "No dns records found for the input")]
@ -57,25 +51,18 @@ pub enum ConnectError {
impl std::error::Error for ConnectError {} impl std::error::Error for ConnectError {}
impl From<actix_connect::ConnectError> for ConnectError { impl From<actix_tls::connect::ConnectError> for ConnectError {
fn from(err: actix_connect::ConnectError) -> ConnectError { fn from(err: actix_tls::connect::ConnectError) -> ConnectError {
match err { match err {
actix_connect::ConnectError::Resolver(e) => ConnectError::Resolver(e), actix_tls::connect::ConnectError::Resolver(e) => ConnectError::Resolver(e),
actix_connect::ConnectError::NoRecords => ConnectError::NoRecords, actix_tls::connect::ConnectError::NoRecords => ConnectError::NoRecords,
actix_connect::ConnectError::InvalidInput => panic!(), actix_tls::connect::ConnectError::InvalidInput => panic!(),
actix_connect::ConnectError::Unresolved => ConnectError::Unresolved, actix_tls::connect::ConnectError::Unresolved => ConnectError::Unresolved,
actix_connect::ConnectError::Io(e) => ConnectError::Io(e), actix_tls::connect::ConnectError::Io(e) => ConnectError::Io(e),
} }
} }
} }
#[cfg(feature = "openssl")]
impl<T: std::fmt::Debug> From<HandshakeError<T>> for ConnectError {
fn from(err: HandshakeError<T>) -> ConnectError {
ConnectError::SslHandshakeError(format!("{:?}", err))
}
}
#[derive(Debug, Display, From)] #[derive(Debug, Display, From)]
pub enum InvalidUrl { pub enum InvalidUrl {
#[display(fmt = "Missing url scheme")] #[display(fmt = "Missing url scheme")]

View File

@ -1,10 +1,10 @@
use std::io::Write; use std::io::Write;
use std::pin::Pin; use std::pin::Pin;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use std::{io, mem, time}; use std::{io, time};
use actix_codec::{AsyncRead, AsyncWrite, Framed}; use actix_codec::{AsyncRead, AsyncWrite, Framed, ReadBuf};
use bytes::buf::BufMutExt; use bytes::buf::BufMut;
use bytes::{Bytes, BytesMut}; use bytes::{Bytes, BytesMut};
use futures_core::Stream; use futures_core::Stream;
use futures_util::future::poll_fn; use futures_util::future::poll_fn;
@ -45,14 +45,14 @@ where
Some(port) => write!(wrt, "{}:{}", host, port), Some(port) => write!(wrt, "{}:{}", host, port),
}; };
match wrt.get_mut().split().freeze().try_into() { match wrt.get_mut().split().freeze().try_into_value() {
Ok(value) => match head { Ok(value) => match head {
RequestHeadType::Owned(ref mut head) => { RequestHeadType::Owned(ref mut head) => {
head.headers.insert(HOST, value) head.headers.insert(HOST, value);
} }
RequestHeadType::Rc(_, ref mut extra_headers) => { RequestHeadType::Rc(_, ref mut extra_headers) => {
let headers = extra_headers.get_or_insert(HeaderMap::new()); let headers = extra_headers.get_or_insert(HeaderMap::new());
headers.insert(HOST, value) headers.insert(HOST, value);
} }
}, },
Err(e) => log::error!("Can not set HOST header {}", e), Err(e) => log::error!("Can not set HOST header {}", e),
@ -67,17 +67,17 @@ where
}; };
// create Framed and send request // create Framed and send request
let mut framed = Framed::new(io, h1::ClientCodec::default()); let mut framed_inner = Framed::new(io, h1::ClientCodec::default());
framed.send((head, body.size()).into()).await?; framed_inner.send((head, body.size()).into()).await?;
// send request body // send request body
match body.size() { match body.size() {
BodySize::None | BodySize::Empty | BodySize::Sized(0) => (), BodySize::None | BodySize::Empty | BodySize::Sized(0) => {}
_ => send_body(body, &mut framed).await?, _ => send_body(body, Pin::new(&mut framed_inner)).await?,
}; };
// read response and init read body // read response and init read body
let res = framed.into_future().await; let res = Pin::new(&mut framed_inner).into_future().await;
let (head, framed) = if let (Some(result), framed) = res { let (head, framed) = if let (Some(result), framed) = res {
let item = result.map_err(SendRequestError::from)?; let item = result.map_err(SendRequestError::from)?;
(item, framed) (item, framed)
@ -85,14 +85,14 @@ where
return Err(SendRequestError::from(ConnectError::Disconnected)); return Err(SendRequestError::from(ConnectError::Disconnected));
}; };
match framed.get_codec().message_type() { match framed.codec_ref().message_type() {
h1::MessageType::None => { h1::MessageType::None => {
let force_close = !framed.get_codec().keepalive(); let force_close = !framed.codec_ref().keepalive();
release_connection(framed, force_close); release_connection(framed, force_close);
Ok((head, Payload::None)) Ok((head, Payload::None))
} }
_ => { _ => {
let pl: PayloadStream = PlStream::new(framed).boxed_local(); let pl: PayloadStream = PlStream::new(framed_inner).boxed_local();
Ok((head, pl.into())) Ok((head, pl.into()))
} }
} }
@ -119,35 +119,36 @@ where
} }
/// send request body to the peer /// send request body to the peer
pub(crate) async fn send_body<I, B>( pub(crate) async fn send_body<T, B>(
body: B, body: B,
framed: &mut Framed<I, h1::ClientCodec>, mut framed: Pin<&mut Framed<T, h1::ClientCodec>>,
) -> Result<(), SendRequestError> ) -> Result<(), SendRequestError>
where where
I: ConnectionLifetime, T: ConnectionLifetime + Unpin,
B: MessageBody, B: MessageBody,
{ {
let mut eof = false;
pin_mut!(body); pin_mut!(body);
let mut eof = false;
while !eof { while !eof {
while !eof && !framed.is_write_buf_full() { while !eof && !framed.as_ref().is_write_buf_full() {
match poll_fn(|cx| body.as_mut().poll_next(cx)).await { match poll_fn(|cx| body.as_mut().poll_next(cx)).await {
Some(result) => { Some(result) => {
framed.write(h1::Message::Chunk(Some(result?)))?; framed.as_mut().write(h1::Message::Chunk(Some(result?)))?;
} }
None => { None => {
eof = true; eof = true;
framed.write(h1::Message::Chunk(None))?; framed.as_mut().write(h1::Message::Chunk(None))?;
} }
} }
} }
if !framed.is_write_buf_empty() { if !framed.as_ref().is_write_buf_empty() {
poll_fn(|cx| match framed.flush(cx) { poll_fn(|cx| match framed.as_mut().flush(cx) {
Poll::Ready(Ok(_)) => Poll::Ready(Ok(())), Poll::Ready(Ok(_)) => Poll::Ready(Ok(())),
Poll::Ready(Err(err)) => Poll::Ready(Err(err)), Poll::Ready(Err(err)) => Poll::Ready(Err(err)),
Poll::Pending => { Poll::Pending => {
if !framed.is_write_buf_full() { if !framed.as_ref().is_write_buf_full() {
Poll::Ready(Ok(())) Poll::Ready(Ok(()))
} else { } else {
Poll::Pending Poll::Pending
@ -158,13 +159,14 @@ where
} }
} }
SinkExt::flush(framed).await?; SinkExt::flush(Pin::into_inner(framed)).await?;
Ok(()) Ok(())
} }
#[doc(hidden)] #[doc(hidden)]
/// HTTP client connection /// HTTP client connection
pub struct H1Connection<T> { pub struct H1Connection<T> {
/// T should be `Unpin`
io: Option<T>, io: Option<T>,
created: time::Instant, created: time::Instant,
pool: Option<Acquired<T>>, pool: Option<Acquired<T>>,
@ -175,7 +177,7 @@ where
T: AsyncRead + AsyncWrite + Unpin + 'static, T: AsyncRead + AsyncWrite + Unpin + 'static,
{ {
/// Close connection /// Close connection
fn close(&mut self) { fn close(mut self: Pin<&mut Self>) {
if let Some(mut pool) = self.pool.take() { if let Some(mut pool) = self.pool.take() {
if let Some(io) = self.io.take() { if let Some(io) = self.io.take() {
pool.close(IoConnection::new( pool.close(IoConnection::new(
@ -188,7 +190,7 @@ where
} }
/// Release this connection to the connection pool /// Release this connection to the connection pool
fn release(&mut self) { fn release(mut self: Pin<&mut Self>) {
if let Some(mut pool) = self.pool.take() { if let Some(mut pool) = self.pool.take() {
if let Some(io) = self.io.take() { if let Some(io) = self.io.take() {
pool.release(IoConnection::new( pool.release(IoConnection::new(
@ -202,18 +204,11 @@ where
} }
impl<T: AsyncRead + AsyncWrite + Unpin + 'static> AsyncRead for H1Connection<T> { impl<T: AsyncRead + AsyncWrite + Unpin + 'static> AsyncRead for H1Connection<T> {
unsafe fn prepare_uninitialized_buffer(
&self,
buf: &mut [mem::MaybeUninit<u8>],
) -> bool {
self.io.as_ref().unwrap().prepare_uninitialized_buffer(buf)
}
fn poll_read( fn poll_read(
mut self: Pin<&mut Self>, mut self: Pin<&mut Self>,
cx: &mut Context<'_>, cx: &mut Context<'_>,
buf: &mut [u8], buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<usize>> { ) -> Poll<io::Result<()>> {
Pin::new(&mut self.io.as_mut().unwrap()).poll_read(cx, buf) Pin::new(&mut self.io.as_mut().unwrap()).poll_read(cx, buf)
} }
} }
@ -242,14 +237,18 @@ impl<T: AsyncRead + AsyncWrite + Unpin + 'static> AsyncWrite for H1Connection<T>
} }
} }
#[pin_project::pin_project]
pub(crate) struct PlStream<Io> { pub(crate) struct PlStream<Io> {
#[pin]
framed: Option<Framed<Io, h1::ClientPayloadCodec>>, framed: Option<Framed<Io, h1::ClientPayloadCodec>>,
} }
impl<Io: ConnectionLifetime> PlStream<Io> { impl<Io: ConnectionLifetime> PlStream<Io> {
fn new(framed: Framed<Io, h1::ClientCodec>) -> Self { fn new(framed: Framed<Io, h1::ClientCodec>) -> Self {
let framed = framed.into_map_codec(|codec| codec.into_payload_codec());
PlStream { PlStream {
framed: Some(framed.map_codec(|codec| codec.into_payload_codec())), framed: Some(framed),
} }
} }
} }
@ -261,16 +260,16 @@ impl<Io: ConnectionLifetime> Stream for PlStream<Io> {
self: Pin<&mut Self>, self: Pin<&mut Self>,
cx: &mut Context<'_>, cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> { ) -> Poll<Option<Self::Item>> {
let this = self.get_mut(); let mut this = self.project();
match this.framed.as_mut().unwrap().next_item(cx)? { match this.framed.as_mut().as_pin_mut().unwrap().next_item(cx)? {
Poll::Pending => Poll::Pending, Poll::Pending => Poll::Pending,
Poll::Ready(Some(chunk)) => { Poll::Ready(Some(chunk)) => {
if let Some(chunk) = chunk { if let Some(chunk) = chunk {
Poll::Ready(Some(Ok(chunk))) Poll::Ready(Some(Ok(chunk)))
} else { } else {
let framed = this.framed.take().unwrap(); let framed = this.framed.as_mut().as_pin_mut().unwrap();
let force_close = !framed.get_codec().keepalive(); let force_close = !framed.codec_ref().keepalive();
release_connection(framed, force_close); release_connection(framed, force_close);
Poll::Ready(None) Poll::Ready(None)
} }
@ -280,14 +279,13 @@ impl<Io: ConnectionLifetime> Stream for PlStream<Io> {
} }
} }
fn release_connection<T, U>(framed: Framed<T, U>, force_close: bool) fn release_connection<T, U>(framed: Pin<&mut Framed<T, U>>, force_close: bool)
where where
T: ConnectionLifetime, T: ConnectionLifetime,
{ {
let mut parts = framed.into_parts(); if !force_close && framed.is_read_buf_empty() && framed.is_write_buf_empty() {
if !force_close && parts.read_buf.is_empty() && parts.write_buf.is_empty() { framed.io_pin().release()
parts.io.release()
} else { } else {
parts.io.close() framed.io_pin().close()
} }
} }

View File

@ -22,9 +22,10 @@ use super::config::ConnectorConfig;
use super::connection::{ConnectionType, IoConnection}; use super::connection::{ConnectionType, IoConnection};
use super::error::SendRequestError; use super::error::SendRequestError;
use super::pool::Acquired; use super::pool::Acquired;
use crate::client::connection::H2Connection;
pub(crate) async fn send_request<T, B>( pub(crate) async fn send_request<T, B>(
mut io: SendRequest<Bytes>, mut io: H2Connection,
head: RequestHeadType, head: RequestHeadType,
body: B, body: B,
created: time::Instant, created: time::Instant,
@ -37,10 +38,10 @@ where
trace!("Sending client request: {:?} {:?}", head, body.size()); trace!("Sending client request: {:?} {:?}", head, body.size());
let head_req = head.as_ref().method == Method::HEAD; let head_req = head.as_ref().method == Method::HEAD;
let length = body.size(); let length = body.size();
let eof = match length { let eof = matches!(
BodySize::None | BodySize::Empty | BodySize::Sized(0) => true, length,
_ => false, BodySize::None | BodySize::Empty | BodySize::Sized(0)
}; );
let mut req = Request::new(()); let mut req = Request::new(());
*req.uri_mut() = head.as_ref().uri.clone(); *req.uri_mut() = head.as_ref().uri.clone();
@ -89,7 +90,7 @@ where
CONNECTION | TRANSFER_ENCODING => continue, // http2 specific CONNECTION | TRANSFER_ENCODING => continue, // http2 specific
CONTENT_LENGTH if skip_len => continue, CONTENT_LENGTH if skip_len => continue,
// DATE => has_date = true, // DATE => has_date = true,
_ => (), _ => {}
} }
req.headers_mut().append(key, value.clone()); req.headers_mut().append(key, value.clone());
} }
@ -171,9 +172,9 @@ async fn send_body<B: MessageBody>(
} }
} }
// release SendRequest object /// release SendRequest object
fn release<T: AsyncRead + AsyncWrite + Unpin + 'static>( fn release<T: AsyncRead + AsyncWrite + Unpin + 'static>(
io: SendRequest<Bytes>, io: H2Connection,
pool: Option<Acquired<T>>, pool: Option<Acquired<T>>,
created: time::Instant, created: time::Instant,
close: bool, close: bool,

View File

@ -2,17 +2,19 @@ use std::cell::RefCell;
use std::collections::VecDeque; use std::collections::VecDeque;
use std::future::Future; use std::future::Future;
use std::pin::Pin; use std::pin::Pin;
use std::rc::{Rc, Weak}; use std::rc::Rc;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use std::time::{Duration, Instant}; use std::time::{Duration, Instant};
use actix_codec::{AsyncRead, AsyncWrite}; use actix_codec::{AsyncRead, AsyncWrite, ReadBuf};
use actix_rt::time::{delay_for, Delay}; use actix_rt::time::{sleep, Sleep};
use actix_service::Service; use actix_service::Service;
use actix_utils::{oneshot, task::LocalWaker}; use actix_utils::task::LocalWaker;
use ahash::AHashMap;
use bytes::Bytes; use bytes::Bytes;
use futures_util::future::{poll_fn, FutureExt, LocalBoxFuture}; use futures_channel::oneshot;
use fxhash::FxHashMap; use futures_core::future::LocalBoxFuture;
use futures_util::future::{poll_fn, FutureExt};
use h2::client::{Connection, SendRequest}; use h2::client::{Connection, SendRequest};
use http::uri::Authority; use http::uri::Authority;
use indexmap::IndexSet; use indexmap::IndexSet;
@ -24,6 +26,7 @@ use super::connection::{ConnectionType, IoConnection};
use super::error::ConnectError; use super::error::ConnectError;
use super::h2proto::handshake; use super::h2proto::handshake;
use super::Connect; use super::Connect;
use crate::client::connection::H2Connection;
#[derive(Clone, Copy, PartialEq)] #[derive(Clone, Copy, PartialEq)]
/// Protocol version /// Protocol version
@ -44,35 +47,31 @@ impl From<Authority> for Key {
} }
/// Connections pool /// Connections pool
pub(crate) struct ConnectionPool<T, Io: 'static>(Rc<RefCell<T>>, Rc<RefCell<Inner<Io>>>); pub(crate) struct ConnectionPool<T, Io: 'static>(Rc<T>, Rc<RefCell<Inner<Io>>>);
impl<T, Io> ConnectionPool<T, Io> impl<T, Io> ConnectionPool<T, Io>
where where
Io: AsyncRead + AsyncWrite + Unpin + 'static, Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError> T: Service<Connect, Response = (Io, Protocol), Error = ConnectError> + 'static,
+ 'static,
{ {
pub(crate) fn new(connector: T, config: ConnectorConfig) -> Self { pub(crate) fn new(connector: T, config: ConnectorConfig) -> Self {
let connector_rc = Rc::new(RefCell::new(connector)); let connector_rc = Rc::new(connector);
let inner_rc = Rc::new(RefCell::new(Inner { let inner_rc = Rc::new(RefCell::new(Inner {
config, config,
acquired: 0, acquired: 0,
waiters: Slab::new(), waiters: Slab::new(),
waiters_queue: IndexSet::new(), waiters_queue: IndexSet::new(),
available: FxHashMap::default(), available: AHashMap::default(),
waker: LocalWaker::new(), waker: LocalWaker::new(),
})); }));
// start support future // start support future
actix_rt::spawn(ConnectorPoolSupport { actix_rt::spawn(ConnectorPoolSupport {
connector: connector_rc.clone(), connector: Rc::clone(&connector_rc),
inner: Rc::downgrade(&inner_rc), inner: Rc::clone(&inner_rc),
}); });
ConnectionPool( ConnectionPool(connector_rc, inner_rc)
connector_rc,
inner_rc,
)
} }
} }
@ -85,23 +84,28 @@ where
} }
} }
impl<T, Io> Service for ConnectionPool<T, Io> impl<T, Io> Drop for ConnectionPool<T, Io> {
fn drop(&mut self) {
// wake up the ConnectorPoolSupport when dropping so it can exit properly.
self.1.borrow().waker.wake();
}
}
impl<T, Io> Service<Connect> for ConnectionPool<T, Io>
where where
Io: AsyncRead + AsyncWrite + Unpin + 'static, Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError> T: Service<Connect, Response = (Io, Protocol), Error = ConnectError> + 'static,
+ 'static,
{ {
type Request = Connect;
type Response = IoConnection<Io>; type Response = IoConnection<Io>;
type Error = ConnectError; type Error = ConnectError;
type Future = LocalBoxFuture<'static, Result<IoConnection<Io>, ConnectError>>; type Future = LocalBoxFuture<'static, Result<IoConnection<Io>, ConnectError>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.poll_ready(cx) self.0.poll_ready(cx)
} }
fn call(&mut self, req: Connect) -> Self::Future { fn call(&self, req: Connect) -> Self::Future {
let mut connector = self.0.clone(); let connector = self.0.clone();
let inner = self.1.clone(); let inner = self.1.clone();
let fut = async move { let fut = async move {
@ -115,11 +119,11 @@ where
match poll_fn(|cx| Poll::Ready(inner.borrow_mut().acquire(&key, cx))).await { match poll_fn(|cx| Poll::Ready(inner.borrow_mut().acquire(&key, cx))).await {
Acquire::Acquired(io, created) => { Acquire::Acquired(io, created) => {
// use existing connection // use existing connection
return Ok(IoConnection::new( Ok(IoConnection::new(
io, io,
created, created,
Some(Acquired(key, Some(inner))), Some(Acquired(key, Some(inner))),
)); ))
} }
Acquire::Available => { Acquire::Available => {
// open tcp connection // open tcp connection
@ -136,10 +140,9 @@ where
Some(guard.consume()), Some(guard.consume()),
)) ))
} else { } else {
let (snd, connection) = handshake(io, &config).await?; let (sender, connection) = handshake(io, &config).await?;
actix_rt::spawn(connection.map(|_| ()));
Ok(IoConnection::new( Ok(IoConnection::new(
ConnectionType::H2(snd), ConnectionType::H2(H2Connection::new(sender, connection)),
Instant::now(), Instant::now(),
Some(guard.consume()), Some(guard.consume()),
)) ))
@ -255,7 +258,7 @@ struct AvailableConnection<Io> {
pub(crate) struct Inner<Io> { pub(crate) struct Inner<Io> {
config: ConnectorConfig, config: ConnectorConfig,
acquired: usize, acquired: usize,
available: FxHashMap<Key, VecDeque<AvailableConnection<Io>>>, available: AHashMap<Key, VecDeque<AvailableConnection<Io>>>,
waiters: Slab< waiters: Slab<
Option<( Option<(
Connect, Connect,
@ -323,21 +326,22 @@ where
{ {
if let Some(timeout) = self.config.disconnect_timeout { if let Some(timeout) = self.config.disconnect_timeout {
if let ConnectionType::H1(io) = conn.io { if let ConnectionType::H1(io) = conn.io {
actix_rt::spawn(CloseConnection::new(io, timeout)) actix_rt::spawn(CloseConnection::new(io, timeout));
} }
} }
} else { } else {
let mut io = conn.io; let mut io = conn.io;
let mut buf = [0; 2]; let mut buf = [0; 2];
let mut read_buf = ReadBuf::new(&mut buf);
if let ConnectionType::H1(ref mut s) = io { if let ConnectionType::H1(ref mut s) = io {
match Pin::new(s).poll_read(cx, &mut buf) { match Pin::new(s).poll_read(cx, &mut read_buf) {
Poll::Pending => (), Poll::Pending => {}
Poll::Ready(Ok(n)) if n > 0 => { Poll::Ready(Ok(())) if !read_buf.filled().is_empty() => {
if let Some(timeout) = self.config.disconnect_timeout { if let Some(timeout) = self.config.disconnect_timeout {
if let ConnectionType::H1(io) = io { if let ConnectionType::H1(io) = io {
actix_rt::spawn(CloseConnection::new( actix_rt::spawn(CloseConnection::new(
io, timeout, io, timeout,
)) ));
} }
} }
continue; continue;
@ -369,7 +373,7 @@ where
self.acquired -= 1; self.acquired -= 1;
if let Some(timeout) = self.config.disconnect_timeout { if let Some(timeout) = self.config.disconnect_timeout {
if let ConnectionType::H1(io) = io { if let ConnectionType::H1(io) = io {
actix_rt::spawn(CloseConnection::new(io, timeout)) actix_rt::spawn(CloseConnection::new(io, timeout));
} }
} }
self.check_availability(); self.check_availability();
@ -382,9 +386,11 @@ where
} }
} }
#[pin_project::pin_project]
struct CloseConnection<T> { struct CloseConnection<T> {
io: T, io: T,
timeout: Delay, #[pin]
timeout: Sleep,
} }
impl<T> CloseConnection<T> impl<T> CloseConnection<T>
@ -394,7 +400,7 @@ where
fn new(io: T, timeout: Duration) -> Self { fn new(io: T, timeout: Duration) -> Self {
CloseConnection { CloseConnection {
io, io,
timeout: delay_for(timeout), timeout: sleep(timeout),
} }
} }
} }
@ -406,11 +412,11 @@ where
type Output = (); type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> { fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
let this = self.get_mut(); let this = self.project();
match Pin::new(&mut this.timeout).poll(cx) { match this.timeout.poll(cx) {
Poll::Ready(_) => Poll::Ready(()), Poll::Ready(_) => Poll::Ready(()),
Poll::Pending => match Pin::new(&mut this.io).poll_shutdown(cx) { Poll::Pending => match Pin::new(this.io).poll_shutdown(cx) {
Poll::Ready(_) => Poll::Ready(()), Poll::Ready(_) => Poll::Ready(()),
Poll::Pending => Poll::Pending, Poll::Pending => Poll::Pending,
}, },
@ -423,14 +429,14 @@ struct ConnectorPoolSupport<T, Io>
where where
Io: AsyncRead + AsyncWrite + Unpin + 'static, Io: AsyncRead + AsyncWrite + Unpin + 'static,
{ {
connector: T, connector: Rc<T>,
inner: Weak<RefCell<Inner<Io>>>, inner: Rc<RefCell<Inner<Io>>>,
} }
impl<T, Io> Future for ConnectorPoolSupport<T, Io> impl<T, Io> Future for ConnectorPoolSupport<T, Io>
where where
Io: AsyncRead + AsyncWrite + Unpin + 'static, Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>, T: Service<Connect, Response = (Io, Protocol), Error = ConnectError>,
T::Future: 'static, T::Future: 'static,
{ {
type Output = (); type Output = ();
@ -438,55 +444,57 @@ where
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project(); let this = self.project();
if let Some(this_inner) = this.inner.upgrade() { if Rc::strong_count(this.inner) == 1 {
let mut inner = this_inner.as_ref().borrow_mut(); // If we are last copy of Inner<Io> it means the ConnectionPool is already gone
inner.waker.register(cx.waker()); // and we are safe to exit.
return Poll::Ready(());
}
// check waiters let mut inner = this.inner.borrow_mut();
loop { inner.waker.register(cx.waker());
let (key, token) = {
if let Some((key, token)) = inner.waiters_queue.get_index(0) {
(key.clone(), *token)
} else {
break;
}
};
if inner.waiters.get(token).unwrap().is_none() {
continue;
}
match inner.acquire(&key, cx) { // check waiters
Acquire::NotAvailable => break, loop {
Acquire::Acquired(io, created) => { let (key, token) = {
let tx = inner.waiters.get_mut(token).unwrap().take().unwrap().1; if let Some((key, token)) = inner.waiters_queue.get_index(0) {
if let Err(conn) = tx.send(Ok(IoConnection::new( (key.clone(), *token)
io, } else {
created, break;
Some(Acquired(key.clone(), Some(this_inner.clone()))),
))) {
let (io, created) = conn.unwrap().into_inner();
inner.release_conn(&key, io, created);
}
}
Acquire::Available => {
let (connect, tx) =
inner.waiters.get_mut(token).unwrap().take().unwrap();
OpenWaitingConnection::spawn(
key.clone(),
tx,
this_inner.clone(),
this.connector.call(connect),
inner.config.clone(),
);
}
} }
let _ = inner.waiters_queue.swap_remove_index(0); };
if inner.waiters.get(token).unwrap().is_none() {
continue;
} }
Poll::Pending match inner.acquire(&key, cx) {
} else { Acquire::NotAvailable => break,
Poll::Ready(()) Acquire::Acquired(io, created) => {
let tx = inner.waiters.get_mut(token).unwrap().take().unwrap().1;
if let Err(conn) = tx.send(Ok(IoConnection::new(
io,
created,
Some(Acquired(key.clone(), Some(this.inner.clone()))),
))) {
let (io, created) = conn.unwrap().into_inner();
inner.release_conn(&key, io, created);
}
}
Acquire::Available => {
let (connect, tx) =
inner.waiters.get_mut(token).unwrap().take().unwrap();
OpenWaitingConnection::spawn(
key.clone(),
tx,
this.inner.clone(),
this.connector.call(connect),
inner.config.clone(),
);
}
}
let _ = inner.waiters_queue.swap_remove_index(0);
} }
Poll::Pending
} }
} }
@ -528,7 +536,7 @@ where
rx: Some(rx), rx: Some(rx),
inner: Some(inner), inner: Some(inner),
config, config,
}) });
} }
} }
@ -558,11 +566,10 @@ where
if let Some(ref mut h2) = this.h2 { if let Some(ref mut h2) = this.h2 {
return match Pin::new(h2).poll(cx) { return match Pin::new(h2).poll(cx) {
Poll::Ready(Ok((snd, connection))) => { Poll::Ready(Ok((sender, connection))) => {
actix_rt::spawn(connection.map(|_| ()));
let rx = this.rx.take().unwrap(); let rx = this.rx.take().unwrap();
let _ = rx.send(Ok(IoConnection::new( let _ = rx.send(Ok(IoConnection::new(
ConnectionType::H2(snd), ConnectionType::H2(H2Connection::new(sender, connection)),
Instant::now(), Instant::now(),
Some(Acquired(this.key.clone(), this.inner.take())), Some(Acquired(this.key.clone(), this.inner.take())),
))); )));

View File

View File

@ -1,40 +0,0 @@
use std::cell::RefCell;
use std::rc::Rc;
use std::task::{Context, Poll};
use actix_service::Service;
#[doc(hidden)]
/// Service that allows to turn non-clone service to a service with `Clone` impl
///
/// # Panics
/// CloneableService might panic with some creative use of thread local storage.
/// See https://github.com/actix/actix-web/issues/1295 for example
pub(crate) struct CloneableService<T: Service>(Rc<RefCell<T>>);
impl<T: Service> CloneableService<T> {
pub(crate) fn new(service: T) -> Self {
Self(Rc::new(RefCell::new(service)))
}
}
impl<T: Service> Clone for CloneableService<T> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
impl<T: Service> Service for CloneableService<T> {
type Request = T::Request;
type Response = T::Response;
type Error = T::Error;
type Future = T::Future;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.borrow_mut().poll_ready(cx)
}
fn call(&mut self, req: T::Request) -> Self::Future {
self.0.borrow_mut().call(req)
}
}

View File

@ -4,12 +4,12 @@ use std::rc::Rc;
use std::time::Duration; use std::time::Duration;
use std::{fmt, net}; use std::{fmt, net};
use actix_rt::time::{delay_for, delay_until, Delay, Instant}; use actix_rt::time::{sleep, sleep_until, Instant, Sleep};
use bytes::BytesMut; use bytes::BytesMut;
use futures_util::{future, FutureExt}; use futures_util::{future, FutureExt};
use time::OffsetDateTime; use time::OffsetDateTime;
// "Sun, 06 Nov 1994 08:49:37 GMT".len() /// "Sun, 06 Nov 1994 08:49:37 GMT".len()
const DATE_VALUE_LENGTH: usize = 29; const DATE_VALUE_LENGTH: usize = 29;
#[derive(Debug, PartialEq, Clone, Copy)] #[derive(Debug, PartialEq, Clone, Copy)]
@ -17,7 +17,7 @@ const DATE_VALUE_LENGTH: usize = 29;
pub enum KeepAlive { pub enum KeepAlive {
/// Keep alive in seconds /// Keep alive in seconds
Timeout(usize), Timeout(usize),
/// Relay on OS to shutdown tcp connection /// Rely on OS to shutdown tcp connection
Os, Os,
/// Disabled /// Disabled
Disabled, Disabled,
@ -121,10 +121,10 @@ impl ServiceConfig {
#[inline] #[inline]
/// Client timeout for first request. /// Client timeout for first request.
pub fn client_timer(&self) -> Option<Delay> { pub fn client_timer(&self) -> Option<Sleep> {
let delay_time = self.0.client_timeout; let delay_time = self.0.client_timeout;
if delay_time != 0 { if delay_time != 0 {
Some(delay_until( Some(sleep_until(
self.0.timer.now() + Duration::from_millis(delay_time), self.0.timer.now() + Duration::from_millis(delay_time),
)) ))
} else { } else {
@ -154,9 +154,9 @@ impl ServiceConfig {
#[inline] #[inline]
/// Return keep-alive timer delay is configured. /// Return keep-alive timer delay is configured.
pub fn keep_alive_timer(&self) -> Option<Delay> { pub fn keep_alive_timer(&self) -> Option<Sleep> {
if let Some(ka) = self.0.keep_alive { if let Some(ka) = self.0.keep_alive {
Some(delay_until(self.0.timer.now() + ka)) Some(sleep_until(self.0.timer.now() + ka))
} else { } else {
None None
} }
@ -209,6 +209,7 @@ impl Date {
date.update(); date.update();
date date
} }
fn update(&mut self) { fn update(&mut self) {
self.pos = 0; self.pos = 0;
write!( write!(
@ -265,7 +266,7 @@ impl DateService {
// periodic date update // periodic date update
let s = self.clone(); let s = self.clone();
actix_rt::spawn(delay_for(Duration::from_millis(500)).then(move |_| { actix_rt::spawn(sleep(Duration::from_millis(500)).then(move |_| {
s.0.reset(); s.0.reset();
future::ready(()) future::ready(())
})); }));

View File

@ -3,14 +3,14 @@ use std::io::{self, Write};
use std::pin::Pin; use std::pin::Pin;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use actix_threadpool::{run, CpuFuture}; use actix_rt::task::{spawn_blocking, JoinHandle};
use brotli2::write::BrotliDecoder; use brotli2::write::BrotliDecoder;
use bytes::Bytes; use bytes::Bytes;
use flate2::write::{GzDecoder, ZlibDecoder}; use flate2::write::{GzDecoder, ZlibDecoder};
use futures_core::{ready, Stream}; use futures_core::{ready, Stream};
use super::Writer; use super::Writer;
use crate::error::PayloadError; use crate::error::{BlockingError, PayloadError};
use crate::http::header::{ContentEncoding, HeaderMap, CONTENT_ENCODING}; use crate::http::header::{ContentEncoding, HeaderMap, CONTENT_ENCODING};
const INPLACE: usize = 2049; const INPLACE: usize = 2049;
@ -19,7 +19,7 @@ pub struct Decoder<S> {
decoder: Option<ContentDecoder>, decoder: Option<ContentDecoder>,
stream: S, stream: S,
eof: bool, eof: bool,
fut: Option<CpuFuture<(Option<Bytes>, ContentDecoder), io::Error>>, fut: Option<JoinHandle<Result<(Option<Bytes>, ContentDecoder), io::Error>>>,
} }
impl<S> Decoder<S> impl<S> Decoder<S>
@ -79,10 +79,8 @@ where
) -> Poll<Option<Self::Item>> { ) -> Poll<Option<Self::Item>> {
loop { loop {
if let Some(ref mut fut) = self.fut { if let Some(ref mut fut) = self.fut {
let (chunk, decoder) = match ready!(Pin::new(fut).poll(cx)) { let (chunk, decoder) =
Ok(item) => item, ready!(Pin::new(fut).poll(cx)).map_err(|_| BlockingError)??;
Err(e) => return Poll::Ready(Some(Err(e.into()))),
};
self.decoder = Some(decoder); self.decoder = Some(decoder);
self.fut.take(); self.fut.take();
if let Some(chunk) = chunk { if let Some(chunk) = chunk {
@ -105,7 +103,7 @@ where
return Poll::Ready(Some(Ok(chunk))); return Poll::Ready(Some(Ok(chunk)));
} }
} else { } else {
self.fut = Some(run(move || { self.fut = Some(spawn_blocking(move || {
let chunk = decoder.feed_data(chunk)?; let chunk = decoder.feed_data(chunk)?;
Ok((chunk, decoder)) Ok((chunk, decoder))
})); }));

View File

@ -4,7 +4,7 @@ use std::io::{self, Write};
use std::pin::Pin; use std::pin::Pin;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use actix_threadpool::{run, CpuFuture}; use actix_rt::task::{spawn_blocking, JoinHandle};
use brotli2::write::BrotliEncoder; use brotli2::write::BrotliEncoder;
use bytes::Bytes; use bytes::Bytes;
use flate2::write::{GzEncoder, ZlibEncoder}; use flate2::write::{GzEncoder, ZlibEncoder};
@ -17,6 +17,7 @@ use crate::http::{HeaderValue, StatusCode};
use crate::{Error, ResponseHead}; use crate::{Error, ResponseHead};
use super::Writer; use super::Writer;
use crate::error::BlockingError;
const INPLACE: usize = 1024; const INPLACE: usize = 1024;
@ -26,7 +27,7 @@ pub struct Encoder<B> {
#[pin] #[pin]
body: EncoderBody<B>, body: EncoderBody<B>,
encoder: Option<ContentEncoder>, encoder: Option<ContentEncoder>,
fut: Option<CpuFuture<ContentEncoder, io::Error>>, fut: Option<JoinHandle<Result<ContentEncoder, io::Error>>>,
} }
impl<B: MessageBody> Encoder<B> { impl<B: MessageBody> Encoder<B> {
@ -135,10 +136,8 @@ impl<B: MessageBody> MessageBody for Encoder<B> {
} }
if let Some(ref mut fut) = this.fut { if let Some(ref mut fut) = this.fut {
let mut encoder = match ready!(Pin::new(fut).poll(cx)) { let mut encoder =
Ok(item) => item, ready!(Pin::new(fut).poll(cx)).map_err(|_| BlockingError)??;
Err(e) => return Poll::Ready(Some(Err(e.into()))),
};
let chunk = encoder.take(); let chunk = encoder.take();
*this.encoder = Some(encoder); *this.encoder = Some(encoder);
this.fut.take(); this.fut.take();
@ -160,7 +159,7 @@ impl<B: MessageBody> MessageBody for Encoder<B> {
return Poll::Ready(Some(Ok(chunk))); return Poll::Ready(Some(Ok(chunk)));
} }
} else { } else {
*this.fut = Some(run(move || { *this.fut = Some(spawn_blocking(move || {
encoder.write(&chunk)?; encoder.write(&chunk)?;
Ok(encoder) Ok(encoder)
})); }));

View File

@ -1,4 +1,5 @@
//! Error and Result module //! Error and Result module
use std::cell::RefCell; use std::cell::RefCell;
use std::io::Write; use std::io::Write;
use std::str::Utf8Error; use std::str::Utf8Error;
@ -6,8 +7,7 @@ use std::string::FromUtf8Error;
use std::{fmt, io, result}; use std::{fmt, io, result};
use actix_codec::{Decoder, Encoder}; use actix_codec::{Decoder, Encoder};
pub use actix_threadpool::BlockingError; use actix_utils::dispatcher::DispatcherError as FramedDispatcherError;
use actix_utils::framed::DispatcherError as FramedDispatcherError;
use actix_utils::timeout::TimeoutError; use actix_utils::timeout::TimeoutError;
use bytes::BytesMut; use bytes::BytesMut;
use derive_more::{Display, From}; use derive_more::{Display, From};
@ -18,13 +18,12 @@ use serde::de::value::Error as DeError;
use serde_json::error::Error as JsonError; use serde_json::error::Error as JsonError;
use serde_urlencoded::ser::Error as FormError; use serde_urlencoded::ser::Error as FormError;
// re-export for convenience
use crate::body::Body; use crate::body::Body;
pub use crate::cookie::ParseError as CookieParseError; pub use crate::cookie::ParseError as CookieParseError;
use crate::helpers::Writer; use crate::helpers::Writer;
use crate::response::{Response, ResponseBuilder}; use crate::response::{Response, ResponseBuilder};
/// A specialized [`Result`](https://doc.rust-lang.org/std/result/enum.Result.html) /// A specialized [`std::result::Result`]
/// for actix web operations /// for actix web operations
/// ///
/// This typedef is generally used to avoid writing out /// This typedef is generally used to avoid writing out
@ -99,10 +98,6 @@ impl fmt::Debug for Error {
} }
impl std::error::Error for Error { impl std::error::Error for Error {
fn cause(&self) -> Option<&dyn std::error::Error> {
None
}
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> { fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
None None
} }
@ -152,7 +147,10 @@ impl From<ResponseBuilder> for Error {
} }
} }
/// Return `GATEWAY_TIMEOUT` for `TimeoutError` /// Inspects the underlying enum and returns an appropriate status code.
///
/// If the variant is [`TimeoutError::Service`], the error code of the service is returned.
/// Otherwise, [`StatusCode::GATEWAY_TIMEOUT`] is returned.
impl<E: ResponseError> ResponseError for TimeoutError<E> { impl<E: ResponseError> ResponseError for TimeoutError<E> {
fn status_code(&self) -> StatusCode { fn status_code(&self) -> StatusCode {
match self { match self {
@ -166,48 +164,44 @@ impl<E: ResponseError> ResponseError for TimeoutError<E> {
#[display(fmt = "UnknownError")] #[display(fmt = "UnknownError")]
struct UnitError; struct UnitError;
/// `InternalServerError` for `UnitError` /// Returns [`StatusCode::INTERNAL_SERVER_ERROR`] for [`UnitError`].
impl ResponseError for UnitError {} impl ResponseError for UnitError {}
/// `InternalServerError` for `JsonError` /// Returns [`StatusCode::INTERNAL_SERVER_ERROR`] for [`JsonError`].
impl ResponseError for JsonError {} impl ResponseError for JsonError {}
/// `InternalServerError` for `FormError` /// Returns [`StatusCode::INTERNAL_SERVER_ERROR`] for [`FormError`].
impl ResponseError for FormError {} impl ResponseError for FormError {}
#[cfg(feature = "openssl")] #[cfg(feature = "openssl")]
/// `InternalServerError` for `openssl::ssl::Error` /// Returns [`StatusCode::INTERNAL_SERVER_ERROR`] for [`actix_tls::accept::openssl::SslError`].
impl ResponseError for actix_connect::ssl::openssl::SslError {} impl ResponseError for actix_tls::accept::openssl::SslError {}
#[cfg(feature = "openssl")] /// Returns [`StatusCode::BAD_REQUEST`] for [`DeError`].
/// `InternalServerError` for `openssl::ssl::HandshakeError`
impl<T: std::fmt::Debug> ResponseError for actix_tls::openssl::HandshakeError<T> {}
/// Return `BAD_REQUEST` for `de::value::Error`
impl ResponseError for DeError { impl ResponseError for DeError {
fn status_code(&self) -> StatusCode { fn status_code(&self) -> StatusCode {
StatusCode::BAD_REQUEST StatusCode::BAD_REQUEST
} }
} }
/// `InternalServerError` for `Canceled` /// Returns [`StatusCode::INTERNAL_SERVER_ERROR`] for [`Canceled`].
impl ResponseError for Canceled {} impl ResponseError for Canceled {}
/// `InternalServerError` for `BlockingError` /// Returns [`StatusCode::BAD_REQUEST`] for [`Utf8Error`].
impl<E: fmt::Debug> ResponseError for BlockingError<E> {}
/// Return `BAD_REQUEST` for `Utf8Error`
impl ResponseError for Utf8Error { impl ResponseError for Utf8Error {
fn status_code(&self) -> StatusCode { fn status_code(&self) -> StatusCode {
StatusCode::BAD_REQUEST StatusCode::BAD_REQUEST
} }
} }
/// Return `InternalServerError` for `HttpError`, /// Returns [`StatusCode::INTERNAL_SERVER_ERROR`] for [`HttpError`].
/// Response generation can return `HttpError`, so it is internal error
impl ResponseError for HttpError {} impl ResponseError for HttpError {}
/// Return `InternalServerError` for `io::Error` /// Inspects the underlying [`io::ErrorKind`] and returns an appropriate status code.
///
/// If the error is [`io::ErrorKind::NotFound`], [`StatusCode::NOT_FOUND`] is returned. If the
/// error is [`io::ErrorKind::PermissionDenied`], [`StatusCode::FORBIDDEN`] is returned. Otherwise,
/// [`StatusCode::INTERNAL_SERVER_ERROR`] is returned.
impl ResponseError for io::Error { impl ResponseError for io::Error {
fn status_code(&self) -> StatusCode { fn status_code(&self) -> StatusCode {
match self.kind() { match self.kind() {
@ -218,7 +212,7 @@ impl ResponseError for io::Error {
} }
} }
/// `BadRequest` for `InvalidHeaderValue` /// Returns [`StatusCode::BAD_REQUEST`] for [`header::InvalidHeaderValue`].
impl ResponseError for header::InvalidHeaderValue { impl ResponseError for header::InvalidHeaderValue {
fn status_code(&self) -> StatusCode { fn status_code(&self) -> StatusCode {
StatusCode::BAD_REQUEST StatusCode::BAD_REQUEST
@ -307,33 +301,60 @@ impl From<httparse::Error> for ParseError {
} }
} }
/// A set of errors that can occur running blocking tasks in thread pool.
#[derive(Debug, Display)]
#[display(fmt = "Blocking thread pool is gone")]
pub struct BlockingError;
impl std::error::Error for BlockingError {}
/// `InternalServerError` for `BlockingError`
impl ResponseError for BlockingError {}
#[derive(Display, Debug)] #[derive(Display, Debug)]
/// A set of errors that can occur during payload parsing /// A set of errors that can occur during payload parsing
pub enum PayloadError { pub enum PayloadError {
/// A payload reached EOF, but is not complete. /// A payload reached EOF, but is not complete.
#[display( #[display(
fmt = "A payload reached EOF, but is not complete. With error: {:?}", fmt = "A payload reached EOF, but is not complete. Inner error: {:?}",
_0 _0
)] )]
Incomplete(Option<io::Error>), Incomplete(Option<io::Error>),
/// Content encoding stream corruption
/// Content encoding stream corruption.
#[display(fmt = "Can not decode content-encoding.")] #[display(fmt = "Can not decode content-encoding.")]
EncodingCorrupted, EncodingCorrupted,
/// A payload reached size limit.
#[display(fmt = "A payload reached size limit.")] /// Payload reached size limit.
#[display(fmt = "Payload reached size limit.")]
Overflow, Overflow,
/// A payload length is unknown.
#[display(fmt = "A payload length is unknown.")] /// Payload length is unknown.
#[display(fmt = "Payload length is unknown.")]
UnknownLength, UnknownLength,
/// Http2 payload error
/// HTTP/2 payload error.
#[display(fmt = "{}", _0)] #[display(fmt = "{}", _0)]
Http2Payload(h2::Error), Http2Payload(h2::Error),
/// Io error
/// Generic I/O error.
#[display(fmt = "{}", _0)] #[display(fmt = "{}", _0)]
Io(io::Error), Io(io::Error),
} }
impl std::error::Error for PayloadError {} impl std::error::Error for PayloadError {
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
match self {
PayloadError::Incomplete(None) => None,
PayloadError::Incomplete(Some(err)) => Some(err as &dyn std::error::Error),
PayloadError::EncodingCorrupted => None,
PayloadError::Overflow => None,
PayloadError::UnknownLength => None,
PayloadError::Http2Payload(err) => Some(err as &dyn std::error::Error),
PayloadError::Io(err) => Some(err as &dyn std::error::Error),
}
}
}
impl From<h2::Error> for PayloadError { impl From<h2::Error> for PayloadError {
fn from(err: h2::Error) -> Self { fn from(err: h2::Error) -> Self {
@ -353,15 +374,12 @@ impl From<io::Error> for PayloadError {
} }
} }
impl From<BlockingError<io::Error>> for PayloadError { impl From<BlockingError> for PayloadError {
fn from(err: BlockingError<io::Error>) -> Self { fn from(_: BlockingError) -> Self {
match err { PayloadError::Io(io::Error::new(
BlockingError::Error(e) => PayloadError::Io(e), io::ErrorKind::Other,
BlockingError::Canceled => PayloadError::Io(io::Error::new( "Operation is canceled",
io::ErrorKind::Other, ))
"Operation is canceled",
)),
}
} }
} }
@ -452,10 +470,10 @@ impl ResponseError for ContentTypeError {
} }
} }
impl<E, U: Encoder + Decoder> ResponseError for FramedDispatcherError<E, U> impl<E, U: Encoder<I> + Decoder, I> ResponseError for FramedDispatcherError<E, U, I>
where where
E: fmt::Debug + fmt::Display, E: fmt::Debug + fmt::Display,
<U as Encoder>::Error: fmt::Debug, <U as Encoder<I>>::Error: fmt::Debug,
<U as Decoder>::Error: fmt::Debug, <U as Decoder>::Error: fmt::Debug,
{ {
} }
@ -950,21 +968,10 @@ where
InternalError::new(err, StatusCode::NETWORK_AUTHENTICATION_REQUIRED).into() InternalError::new(err, StatusCode::NETWORK_AUTHENTICATION_REQUIRED).into()
} }
#[cfg(feature = "actors")]
/// `InternalServerError` for `actix::MailboxError`
/// This is supported on feature=`actors` only
impl ResponseError for actix::MailboxError {}
#[cfg(feature = "actors")]
/// `InternalServerError` for `actix::ResolverError`
/// This is supported on feature=`actors` only
impl ResponseError for actix::actors::resolver::ResolverError {}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
use http::{Error as HttpError, StatusCode}; use http::{Error as HttpError, StatusCode};
use httparse;
use std::io; use std::io;
#[test] #[test]
@ -1018,22 +1025,22 @@ mod tests {
fn test_payload_error() { fn test_payload_error() {
let err: PayloadError = let err: PayloadError =
io::Error::new(io::ErrorKind::Other, "ParseError").into(); io::Error::new(io::ErrorKind::Other, "ParseError").into();
assert!(format!("{}", err).contains("ParseError")); assert!(err.to_string().contains("ParseError"));
let err = PayloadError::Incomplete(None); let err = PayloadError::Incomplete(None);
assert_eq!( assert_eq!(
format!("{}", err), err.to_string(),
"A payload reached EOF, but is not complete. With error: None" "A payload reached EOF, but is not complete. Inner error: None"
); );
} }
macro_rules! from { macro_rules! from {
($from:expr => $error:pat) => { ($from:expr => $error:pat) => {
match ParseError::from($from) { match ParseError::from($from) {
e @ $error => { err @ $error => {
assert!(format!("{}", e).len() >= 5); assert!(err.to_string().len() >= 5);
} }
e => unreachable!("{:?}", e), err => unreachable!("{:?}", err),
} }
}; };
} }
@ -1076,7 +1083,7 @@ mod tests {
let err = PayloadError::Overflow; let err = PayloadError::Overflow;
let resp_err: &dyn ResponseError = &err; let resp_err: &dyn ResponseError = &err;
let err = resp_err.downcast_ref::<PayloadError>().unwrap(); let err = resp_err.downcast_ref::<PayloadError>().unwrap();
assert_eq!(err.to_string(), "A payload reached size limit."); assert_eq!(err.to_string(), "Payload reached size limit.");
let not_err = resp_err.downcast_ref::<ContentTypeError>(); let not_err = resp_err.downcast_ref::<ContentTypeError>();
assert!(not_err.is_none()); assert!(not_err.is_none());
} }

View File

@ -1,66 +1,133 @@
use std::any::{Any, TypeId}; use std::{
use std::fmt; any::{Any, TypeId},
fmt, mem,
};
use fxhash::FxHashMap; use ahash::AHashMap;
/// A type map for request extensions.
///
/// All entries into this map must be owned types (or static references).
#[derive(Default)] #[derive(Default)]
/// A type map of request extensions.
pub struct Extensions { pub struct Extensions {
/// Use FxHasher with a std HashMap with for faster /// Use FxHasher with a std HashMap with for faster
/// lookups on the small `TypeId` (u64 equivalent) keys. /// lookups on the small `TypeId` (u64 equivalent) keys.
map: FxHashMap<TypeId, Box<dyn Any>>, map: AHashMap<TypeId, Box<dyn Any>>,
} }
impl Extensions { impl Extensions {
/// Create an empty `Extensions`. /// Creates an empty `Extensions`.
#[inline] #[inline]
pub fn new() -> Extensions { pub fn new() -> Extensions {
Extensions { Extensions {
map: FxHashMap::default(), map: AHashMap::default(),
} }
} }
/// Insert a type into this `Extensions`. /// Insert an item into the map.
/// ///
/// If a extension of this type already existed, it will /// If an item of this type was already stored, it will be replaced and returned.
/// be returned. ///
pub fn insert<T: 'static>(&mut self, val: T) { /// ```
self.map.insert(TypeId::of::<T>(), Box::new(val)); /// # use actix_http::Extensions;
/// let mut map = Extensions::new();
/// assert_eq!(map.insert(""), None);
/// assert_eq!(map.insert(1u32), None);
/// assert_eq!(map.insert(2u32), Some(1u32));
/// assert_eq!(*map.get::<u32>().unwrap(), 2u32);
/// ```
pub fn insert<T: 'static>(&mut self, val: T) -> Option<T> {
self.map
.insert(TypeId::of::<T>(), Box::new(val))
.and_then(downcast_owned)
} }
/// Check if container contains entry /// Check if map contains an item of a given type.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
/// assert!(!map.contains::<u32>());
///
/// assert_eq!(map.insert(1u32), None);
/// assert!(map.contains::<u32>());
/// ```
pub fn contains<T: 'static>(&self) -> bool { pub fn contains<T: 'static>(&self) -> bool {
self.map.contains_key(&TypeId::of::<T>()) self.map.contains_key(&TypeId::of::<T>())
} }
/// Get a reference to a type previously inserted on this `Extensions`. /// Get a reference to an item of a given type.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
/// map.insert(1u32);
/// assert_eq!(map.get::<u32>(), Some(&1u32));
/// ```
pub fn get<T: 'static>(&self) -> Option<&T> { pub fn get<T: 'static>(&self) -> Option<&T> {
self.map self.map
.get(&TypeId::of::<T>()) .get(&TypeId::of::<T>())
.and_then(|boxed| boxed.downcast_ref()) .and_then(|boxed| boxed.downcast_ref())
} }
/// Get a mutable reference to a type previously inserted on this `Extensions`. /// Get a mutable reference to an item of a given type.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
/// map.insert(1u32);
/// assert_eq!(map.get_mut::<u32>(), Some(&mut 1u32));
/// ```
pub fn get_mut<T: 'static>(&mut self) -> Option<&mut T> { pub fn get_mut<T: 'static>(&mut self) -> Option<&mut T> {
self.map self.map
.get_mut(&TypeId::of::<T>()) .get_mut(&TypeId::of::<T>())
.and_then(|boxed| boxed.downcast_mut()) .and_then(|boxed| boxed.downcast_mut())
} }
/// Remove a type from this `Extensions`. /// Remove an item from the map of a given type.
/// ///
/// If a extension of this type existed, it will be returned. /// If an item of this type was already stored, it will be returned.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
///
/// map.insert(1u32);
/// assert_eq!(map.get::<u32>(), Some(&1u32));
///
/// assert_eq!(map.remove::<u32>(), Some(1u32));
/// assert!(!map.contains::<u32>());
/// ```
pub fn remove<T: 'static>(&mut self) -> Option<T> { pub fn remove<T: 'static>(&mut self) -> Option<T> {
self.map self.map.remove(&TypeId::of::<T>()).and_then(downcast_owned)
.remove(&TypeId::of::<T>())
.and_then(|boxed| boxed.downcast().ok().map(|boxed| *boxed))
} }
/// Clear the `Extensions` of all inserted extensions. /// Clear the `Extensions` of all inserted extensions.
///
/// ```
/// # use actix_http::Extensions;
/// let mut map = Extensions::new();
///
/// map.insert(1u32);
/// assert!(map.contains::<u32>());
///
/// map.clear();
/// assert!(!map.contains::<u32>());
/// ```
#[inline] #[inline]
pub fn clear(&mut self) { pub fn clear(&mut self) {
self.map.clear(); self.map.clear();
} }
/// Extends self with the items from another `Extensions`.
pub fn extend(&mut self, other: Extensions) {
self.map.extend(other.map);
}
/// Sets (or overrides) items from `other` into this map.
pub(crate) fn drain_from(&mut self, other: &mut Self) {
self.map.extend(mem::take(&mut other.map));
}
} }
impl fmt::Debug for Extensions { impl fmt::Debug for Extensions {
@ -69,6 +136,10 @@ impl fmt::Debug for Extensions {
} }
} }
fn downcast_owned<T: 'static>(boxed: Box<dyn Any>) -> Option<T> {
boxed.downcast().ok().map(|boxed| *boxed)
}
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use super::*; use super::*;
@ -178,4 +249,57 @@ mod tests {
assert_eq!(extensions.get::<bool>(), None); assert_eq!(extensions.get::<bool>(), None);
assert_eq!(extensions.get(), Some(&MyType(10))); assert_eq!(extensions.get(), Some(&MyType(10)));
} }
#[test]
fn test_extend() {
#[derive(Debug, PartialEq)]
struct MyType(i32);
let mut extensions = Extensions::new();
extensions.insert(5i32);
extensions.insert(MyType(10));
let mut other = Extensions::new();
other.insert(15i32);
other.insert(20u8);
extensions.extend(other);
assert_eq!(extensions.get(), Some(&15i32));
assert_eq!(extensions.get_mut(), Some(&mut 15i32));
assert_eq!(extensions.remove::<i32>(), Some(15i32));
assert!(extensions.get::<i32>().is_none());
assert_eq!(extensions.get::<bool>(), None);
assert_eq!(extensions.get(), Some(&MyType(10)));
assert_eq!(extensions.get(), Some(&20u8));
assert_eq!(extensions.get_mut(), Some(&mut 20u8));
}
#[test]
fn test_drain_from() {
let mut ext = Extensions::new();
ext.insert(2isize);
let mut more_ext = Extensions::new();
more_ext.insert(5isize);
more_ext.insert(5usize);
assert_eq!(ext.get::<isize>(), Some(&2isize));
assert_eq!(ext.get::<usize>(), None);
assert_eq!(more_ext.get::<isize>(), Some(&5isize));
assert_eq!(more_ext.get::<usize>(), Some(&5usize));
ext.drain_from(&mut more_ext);
assert_eq!(ext.get::<isize>(), Some(&5isize));
assert_eq!(ext.get::<usize>(), Some(&5usize));
assert_eq!(more_ext.get::<isize>(), None);
assert_eq!(more_ext.get::<usize>(), None);
}
} }

View File

@ -173,13 +173,12 @@ impl Decoder for ClientPayloadCodec {
} }
} }
impl Encoder for ClientCodec { impl Encoder<Message<(RequestHeadType, BodySize)>> for ClientCodec {
type Item = Message<(RequestHeadType, BodySize)>;
type Error = io::Error; type Error = io::Error;
fn encode( fn encode(
&mut self, &mut self,
item: Self::Item, item: Message<(RequestHeadType, BodySize)>,
dst: &mut BytesMut, dst: &mut BytesMut,
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
match item { match item {
@ -224,15 +223,3 @@ impl Encoder for ClientCodec {
Ok(()) Ok(())
} }
} }
pub struct Writer<'a>(pub &'a mut BytesMut);
impl<'a> io::Write for Writer<'a> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.extend_from_slice(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}

View File

@ -58,6 +58,7 @@ impl Codec {
} else { } else {
Flags::empty() Flags::empty()
}; };
Codec { Codec {
config, config,
flags, flags,
@ -69,26 +70,26 @@ impl Codec {
} }
} }
/// Check if request is upgrade.
#[inline] #[inline]
/// Check if request is upgrade
pub fn upgrade(&self) -> bool { pub fn upgrade(&self) -> bool {
self.ctype == ConnectionType::Upgrade self.ctype == ConnectionType::Upgrade
} }
/// Check if last response is keep-alive.
#[inline] #[inline]
/// Check if last response is keep-alive
pub fn keepalive(&self) -> bool { pub fn keepalive(&self) -> bool {
self.ctype == ConnectionType::KeepAlive self.ctype == ConnectionType::KeepAlive
} }
/// Check if keep-alive enabled on server level.
#[inline] #[inline]
/// Check if keep-alive enabled on server level
pub fn keepalive_enabled(&self) -> bool { pub fn keepalive_enabled(&self) -> bool {
self.flags.contains(Flags::KEEPALIVE_ENABLED) self.flags.contains(Flags::KEEPALIVE_ENABLED)
} }
/// Check last request's message type.
#[inline] #[inline]
/// Check last request's message type
pub fn message_type(&self) -> MessageType { pub fn message_type(&self) -> MessageType {
if self.flags.contains(Flags::STREAM) { if self.flags.contains(Flags::STREAM) {
MessageType::Stream MessageType::Stream
@ -110,8 +111,8 @@ impl Decoder for Codec {
type Error = ParseError; type Error = ParseError;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> { fn decode(&mut self, src: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
if self.payload.is_some() { if let Some(ref mut payload) = self.payload {
Ok(match self.payload.as_mut().unwrap().decode(src)? { Ok(match payload.decode(src)? {
Some(PayloadItem::Chunk(chunk)) => Some(Message::Chunk(Some(chunk))), Some(PayloadItem::Chunk(chunk)) => Some(Message::Chunk(Some(chunk))),
Some(PayloadItem::Eof) => { Some(PayloadItem::Eof) => {
self.payload.take(); self.payload.take();
@ -144,13 +145,12 @@ impl Decoder for Codec {
} }
} }
impl Encoder for Codec { impl Encoder<Message<(Response<()>, BodySize)>> for Codec {
type Item = Message<(Response<()>, BodySize)>;
type Error = io::Error; type Error = io::Error;
fn encode( fn encode(
&mut self, &mut self,
item: Self::Item, item: Message<(Response<()>, BodySize)>,
dst: &mut BytesMut, dst: &mut BytesMut,
) -> Result<(), Self::Error> { ) -> Result<(), Self::Error> {
match item { match item {

View File

@ -1,7 +1,6 @@
use std::convert::TryFrom; use std::convert::TryFrom;
use std::io; use std::io;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::mem::MaybeUninit;
use std::task::Poll; use std::task::Poll;
use actix_codec::Decoder; use actix_codec::Decoder;
@ -15,7 +14,7 @@ use crate::header::HeaderMap;
use crate::message::{ConnectionType, ResponseHead}; use crate::message::{ConnectionType, ResponseHead};
use crate::request::Request; use crate::request::Request;
const MAX_BUFFER_SIZE: usize = 131_072; pub(crate) const MAX_BUFFER_SIZE: usize = 131_072;
const MAX_HEADERS: usize = 96; const MAX_HEADERS: usize = 96;
/// Incoming message decoder /// Incoming message decoder
@ -46,7 +45,7 @@ impl<T: MessageType> Decoder for MessageDecoder<T> {
pub(crate) enum PayloadLength { pub(crate) enum PayloadLength {
Payload(PayloadType), Payload(PayloadType),
Upgrade, UpgradeWebSocket,
None, None,
} }
@ -65,7 +64,7 @@ pub(crate) trait MessageType: Sized {
raw_headers: &[HeaderIndex], raw_headers: &[HeaderIndex],
) -> Result<PayloadLength, ParseError> { ) -> Result<PayloadLength, ParseError> {
let mut ka = None; let mut ka = None;
let mut has_upgrade = false; let mut has_upgrade_websocket = false;
let mut expect = false; let mut expect = false;
let mut chunked = false; let mut chunked = false;
let mut content_length = None; let mut content_length = None;
@ -77,12 +76,14 @@ pub(crate) trait MessageType: Sized {
let name = let name =
HeaderName::from_bytes(&slice[idx.name.0..idx.name.1]).unwrap(); HeaderName::from_bytes(&slice[idx.name.0..idx.name.1]).unwrap();
// Unsafe: httparse check header value for valid utf-8 // SAFETY: httparse already checks header value is only visible ASCII bytes
// from_maybe_shared_unchecked contains debug assertions so they are omitted here
let value = unsafe { let value = unsafe {
HeaderValue::from_maybe_shared_unchecked( HeaderValue::from_maybe_shared_unchecked(
slice.slice(idx.value.0..idx.value.1), slice.slice(idx.value.0..idx.value.1),
) )
}; };
match name { match name {
header::CONTENT_LENGTH => { header::CONTENT_LENGTH => {
if let Ok(s) = value.to_str() { if let Ok(s) = value.to_str() {
@ -124,12 +125,9 @@ pub(crate) trait MessageType: Sized {
}; };
} }
header::UPGRADE => { header::UPGRADE => {
has_upgrade = true;
// check content-length, some clients (dart)
// sends "content-length: 0" with websocket upgrade
if let Ok(val) = value.to_str().map(|val| val.trim()) { if let Ok(val) = value.to_str().map(|val| val.trim()) {
if val.eq_ignore_ascii_case("websocket") { if val.eq_ignore_ascii_case("websocket") {
content_length = None; has_upgrade_websocket = true;
} }
} }
} }
@ -139,7 +137,7 @@ pub(crate) trait MessageType: Sized {
expect = true; expect = true;
} }
} }
_ => (), _ => {}
} }
headers.append(name, value); headers.append(name, value);
@ -156,13 +154,13 @@ pub(crate) trait MessageType: Sized {
Ok(PayloadLength::Payload(PayloadType::Payload( Ok(PayloadLength::Payload(PayloadType::Payload(
PayloadDecoder::chunked(), PayloadDecoder::chunked(),
))) )))
} else if has_upgrade_websocket {
Ok(PayloadLength::UpgradeWebSocket)
} else if let Some(len) = content_length { } else if let Some(len) = content_length {
// Content-Length // Content-Length
Ok(PayloadLength::Payload(PayloadType::Payload( Ok(PayloadLength::Payload(PayloadType::Payload(
PayloadDecoder::length(len), PayloadDecoder::length(len),
))) )))
} else if has_upgrade {
Ok(PayloadLength::Upgrade)
} else { } else {
Ok(PayloadLength::None) Ok(PayloadLength::None)
} }
@ -184,16 +182,11 @@ impl MessageType for Request {
&mut self.head_mut().headers &mut self.head_mut().headers
} }
#[allow(clippy::uninit_assumed_init)]
fn decode(src: &mut BytesMut) -> Result<Option<(Self, PayloadType)>, ParseError> { fn decode(src: &mut BytesMut) -> Result<Option<(Self, PayloadType)>, ParseError> {
// Unsafe: we read only this data only after httparse parses headers into. let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY;
// performance bump for pipeline benchmarks.
let mut headers: [HeaderIndex; MAX_HEADERS] =
unsafe { MaybeUninit::uninit().assume_init() };
let (len, method, uri, ver, h_len) = { let (len, method, uri, ver, h_len) = {
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = EMPTY_HEADER_ARRAY;
unsafe { MaybeUninit::uninit().assume_init() };
let mut req = httparse::Request::new(&mut parsed); let mut req = httparse::Request::new(&mut parsed);
match req.parse(src)? { match req.parse(src)? {
@ -210,7 +203,15 @@ impl MessageType for Request {
(len, method, uri, version, req.headers.len()) (len, method, uri, version, req.headers.len())
} }
httparse::Status::Partial => return Ok(None), httparse::Status::Partial => {
return if src.len() >= MAX_BUFFER_SIZE {
trace!("MAX_BUFFER_SIZE unprocessed data reached, closing");
Err(ParseError::TooLarge)
} else {
// Return None to notify more read are needed for parsing request
Ok(None)
};
}
} }
}; };
@ -222,16 +223,13 @@ impl MessageType for Request {
// payload decoder // payload decoder
let decoder = match length { let decoder = match length {
PayloadLength::Payload(pl) => pl, PayloadLength::Payload(pl) => pl,
PayloadLength::Upgrade => { PayloadLength::UpgradeWebSocket => {
// upgrade(websocket) // upgrade(websocket)
PayloadType::Stream(PayloadDecoder::eof()) PayloadType::Stream(PayloadDecoder::eof())
} }
PayloadLength::None => { PayloadLength::None => {
if method == Method::CONNECT { if method == Method::CONNECT {
PayloadType::Stream(PayloadDecoder::eof()) PayloadType::Stream(PayloadDecoder::eof())
} else if src.len() >= MAX_BUFFER_SIZE {
trace!("MAX_BUFFER_SIZE unprocessed data reached, closing");
return Err(ParseError::TooLarge);
} else { } else {
PayloadType::None PayloadType::None
} }
@ -260,16 +258,11 @@ impl MessageType for ResponseHead {
&mut self.headers &mut self.headers
} }
#[allow(clippy::uninit_assumed_init)]
fn decode(src: &mut BytesMut) -> Result<Option<(Self, PayloadType)>, ParseError> { fn decode(src: &mut BytesMut) -> Result<Option<(Self, PayloadType)>, ParseError> {
// Unsafe: we read only this data only after httparse parses headers into. let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY;
// performance bump for pipeline benchmarks.
let mut headers: [HeaderIndex; MAX_HEADERS] =
unsafe { MaybeUninit::uninit().assume_init() };
let (len, ver, status, h_len) = { let (len, ver, status, h_len) = {
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = EMPTY_HEADER_ARRAY;
unsafe { MaybeUninit::uninit().assume_init() };
let mut res = httparse::Response::new(&mut parsed); let mut res = httparse::Response::new(&mut parsed);
match res.parse(src)? { match res.parse(src)? {
@ -285,7 +278,14 @@ impl MessageType for ResponseHead {
(len, version, status, res.headers.len()) (len, version, status, res.headers.len())
} }
httparse::Status::Partial => return Ok(None), httparse::Status::Partial => {
return if src.len() >= MAX_BUFFER_SIZE {
error!("MAX_BUFFER_SIZE unprocessed data reached, closing");
Err(ParseError::TooLarge)
} else {
Ok(None)
}
}
} }
}; };
@ -301,9 +301,6 @@ impl MessageType for ResponseHead {
} else if status == StatusCode::SWITCHING_PROTOCOLS { } else if status == StatusCode::SWITCHING_PROTOCOLS {
// switching protocol or connect // switching protocol or connect
PayloadType::Stream(PayloadDecoder::eof()) PayloadType::Stream(PayloadDecoder::eof())
} else if src.len() >= MAX_BUFFER_SIZE {
error!("MAX_BUFFER_SIZE unprocessed data reached, closing");
return Err(ParseError::TooLarge);
} else { } else {
// for HTTP/1.0 read to eof and close connection // for HTTP/1.0 read to eof and close connection
if msg.version == Version::HTTP_10 { if msg.version == Version::HTTP_10 {
@ -324,6 +321,17 @@ pub(crate) struct HeaderIndex {
pub(crate) value: (usize, usize), pub(crate) value: (usize, usize),
} }
pub(crate) const EMPTY_HEADER_INDEX: HeaderIndex = HeaderIndex {
name: (0, 0),
value: (0, 0),
};
pub(crate) const EMPTY_HEADER_INDEX_ARRAY: [HeaderIndex; MAX_HEADERS] =
[EMPTY_HEADER_INDEX; MAX_HEADERS];
pub(crate) const EMPTY_HEADER_ARRAY: [httparse::Header<'static>; MAX_HEADERS] =
[httparse::EMPTY_HEADER; MAX_HEADERS];
impl HeaderIndex { impl HeaderIndex {
pub(crate) fn record( pub(crate) fn record(
bytes: &[u8], bytes: &[u8],
@ -655,10 +663,7 @@ mod tests {
} }
fn is_unhandled(&self) -> bool { fn is_unhandled(&self) -> bool {
match self { matches!(self, PayloadType::Stream(_))
PayloadType::Stream(_) => true,
_ => false,
}
} }
} }
@ -670,10 +675,7 @@ mod tests {
} }
} }
fn eof(&self) -> bool { fn eof(&self) -> bool {
match *self { matches!(*self, PayloadItem::Eof)
PayloadItem::Eof => true,
_ => false,
}
} }
} }
@ -692,7 +694,7 @@ mod tests {
match MessageDecoder::<Request>::default().decode($e) { match MessageDecoder::<Request>::default().decode($e) {
Err(err) => match err { Err(err) => match err {
ParseError::Io(_) => unreachable!("Parse error expected"), ParseError::Io(_) => unreachable!("Parse error expected"),
_ => (), _ => {}
}, },
_ => unreachable!("Error expected"), _ => unreachable!("Error expected"),
} }
@ -828,8 +830,8 @@ mod tests {
.get_all(SET_COOKIE) .get_all(SET_COOKIE)
.map(|v| v.to_str().unwrap().to_owned()) .map(|v| v.to_str().unwrap().to_owned())
.collect(); .collect();
assert_eq!(val[1], "c1=cookie1"); assert_eq!(val[0], "c1=cookie1");
assert_eq!(val[0], "c2=cookie2"); assert_eq!(val[1], "c2=cookie2");
} }
#[test] #[test]
@ -979,7 +981,7 @@ mod tests {
unreachable!("Error"); unreachable!("Error");
} }
// type in chunked // intentional typo in "chunked"
let mut buf = BytesMut::from( let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\ "GET /test HTTP/1.1\r\n\
transfer-encoding: chnked\r\n\r\n", transfer-encoding: chnked\r\n\r\n",
@ -1040,7 +1042,7 @@ mod tests {
} }
#[test] #[test]
fn test_http_request_upgrade() { fn test_http_request_upgrade_websocket() {
let mut buf = BytesMut::from( let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\ "GET /test HTTP/1.1\r\n\
connection: upgrade\r\n\ connection: upgrade\r\n\
@ -1054,6 +1056,26 @@ mod tests {
assert!(pl.is_unhandled()); assert!(pl.is_unhandled());
} }
#[test]
fn test_http_request_upgrade_h2c() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
connection: upgrade, http2-settings\r\n\
upgrade: h2c\r\n\
http2-settings: dummy\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
// `connection: upgrade, http2-settings` doesn't work properly..
// see MessageType::set_headers().
//
// The line below should be:
// assert_eq!(req.head().connection_type(), ConnectionType::Upgrade);
assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive);
assert!(req.upgrade());
assert!(!pl.is_unhandled());
}
#[test] #[test]
fn test_http_request_parser_utf8() { fn test_http_request_parser_utf8() {
let mut buf = BytesMut::from( let mut buf = BytesMut::from(

File diff suppressed because it is too large Load Diff

View File

@ -8,7 +8,7 @@ use bytes::{BufMut, BytesMut};
use crate::body::BodySize; use crate::body::BodySize;
use crate::config::ServiceConfig; use crate::config::ServiceConfig;
use crate::header::map; use crate::header::{map::Value, HeaderName};
use crate::helpers; use crate::helpers;
use crate::http::header::{CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING}; use crate::http::header::{CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING};
use crate::http::{HeaderMap, StatusCode, Version}; use crate::http::{HeaderMap, StatusCode, Version};
@ -21,7 +21,7 @@ const AVERAGE_HEADER_SIZE: usize = 30;
pub(crate) struct MessageEncoder<T: MessageType> { pub(crate) struct MessageEncoder<T: MessageType> {
pub length: BodySize, pub length: BodySize,
pub te: TransferEncoding, pub te: TransferEncoding,
_t: PhantomData<T>, _phantom: PhantomData<T>,
} }
impl<T: MessageType> Default for MessageEncoder<T> { impl<T: MessageType> Default for MessageEncoder<T> {
@ -29,7 +29,7 @@ impl<T: MessageType> Default for MessageEncoder<T> {
MessageEncoder { MessageEncoder {
length: BodySize::None, length: BodySize::None,
te: TransferEncoding::empty(), te: TransferEncoding::empty(),
_t: PhantomData, _phantom: PhantomData,
} }
} }
} }
@ -64,14 +64,17 @@ pub(crate) trait MessageType: Sized {
// Content length // Content length
if let Some(status) = self.status() { if let Some(status) = self.status() {
match status { match status {
StatusCode::NO_CONTENT StatusCode::CONTINUE
| StatusCode::CONTINUE | StatusCode::SWITCHING_PROTOCOLS
| StatusCode::PROCESSING => length = BodySize::None, | StatusCode::PROCESSING
StatusCode::SWITCHING_PROTOCOLS => { | StatusCode::NO_CONTENT => {
// skip content-length and transfer-encoding headers
// See https://tools.ietf.org/html/rfc7230#section-3.3.1
// and https://tools.ietf.org/html/rfc7230#section-3.3.2
skip_len = true; skip_len = true;
length = BodySize::Stream; length = BodySize::None
} }
_ => (), _ => {}
} }
} }
match length { match length {
@ -115,103 +118,87 @@ pub(crate) trait MessageType: Sized {
dst.put_slice(b"connection: close\r\n") dst.put_slice(b"connection: close\r\n")
} }
} }
_ => (), _ => {}
} }
// merging headers from head and extra headers. HeaderMap::new() does not allocate.
let empty_headers = HeaderMap::new();
let extra_headers = self.extra_headers().unwrap_or(&empty_headers);
let headers = self
.headers()
.inner
.iter()
.filter(|(name, _)| !extra_headers.contains_key(*name))
.chain(extra_headers.inner.iter());
// write headers // write headers
let mut pos = 0;
let mut has_date = false; let mut has_date = false;
let mut buf = dst.chunk_mut().as_mut_ptr();
let mut remaining = dst.capacity() - dst.len(); let mut remaining = dst.capacity() - dst.len();
let mut buf = dst.bytes_mut().as_mut_ptr() as *mut u8;
for (key, value) in headers { // tracks bytes written since last buffer resize
// since buf is a raw pointer to a bytes container storage but is written to without the
// container's knowledge, this is used to sync the containers cursor after data is written
let mut pos = 0;
self.write_headers(|key, value| {
match *key { match *key {
CONNECTION => continue, CONNECTION => return,
TRANSFER_ENCODING | CONTENT_LENGTH if skip_len => continue, TRANSFER_ENCODING | CONTENT_LENGTH if skip_len => return,
DATE => { DATE => has_date = true,
has_date = true; _ => {}
}
_ => (),
} }
let k = key.as_str().as_bytes(); let k = key.as_str().as_bytes();
match value { let k_len = k.len();
map::Value::One(ref val) => {
let v = val.as_ref(); // TODO: drain?
let v_len = v.len(); for val in value.iter() {
let k_len = k.len(); let v = val.as_ref();
let len = k_len + v_len + 4; let v_len = v.len();
if len > remaining {
unsafe { // key length + value length + colon + space + \r\n
dst.advance_mut(pos); let len = k_len + v_len + 4;
}
pos = 0; if len > remaining {
dst.reserve(len * 2); // SAFETY: all the bytes written up to position "pos" are initialized
remaining = dst.capacity() - dst.len(); // the written byte count and pointer advancement are kept in sync
buf = dst.bytes_mut().as_mut_ptr() as *mut u8;
}
// use upper Camel-Case
unsafe { unsafe {
if camel_case { dst.advance_mut(pos);
write_camel_case(k, from_raw_parts_mut(buf, k_len))
} else {
write_data(k, buf, k_len)
}
buf = buf.add(k_len);
write_data(b": ", buf, 2);
buf = buf.add(2);
write_data(v, buf, v_len);
buf = buf.add(v_len);
write_data(b"\r\n", buf, 2);
buf = buf.add(2);
pos += len;
remaining -= len;
} }
pos = 0;
dst.reserve(len * 2);
remaining = dst.capacity() - dst.len();
// re-assign buf raw pointer since it's possible that the buffer was
// reallocated and/or resized
buf = dst.chunk_mut().as_mut_ptr();
} }
map::Value::Multi(ref vec) => {
for val in vec { // SAFETY: on each write, it is enough to ensure that the advancement of
let v = val.as_ref(); // the cursor matches the number of bytes written
let v_len = v.len(); unsafe {
let k_len = k.len(); if camel_case {
let len = k_len + v_len + 4; // use Camel-Case headers
if len > remaining { write_camel_case(k, from_raw_parts_mut(buf, k_len));
unsafe { } else {
dst.advance_mut(pos); write_data(k, buf, k_len);
}
pos = 0;
dst.reserve(len * 2);
remaining = dst.capacity() - dst.len();
buf = dst.bytes_mut().as_mut_ptr() as *mut u8;
}
// use upper Camel-Case
unsafe {
if camel_case {
write_camel_case(k, from_raw_parts_mut(buf, k_len));
} else {
write_data(k, buf, k_len);
}
buf = buf.add(k_len);
write_data(b": ", buf, 2);
buf = buf.add(2);
write_data(v, buf, v_len);
buf = buf.add(v_len);
write_data(b"\r\n", buf, 2);
buf = buf.add(2);
};
pos += len;
remaining -= len;
} }
}
buf = buf.add(k_len);
write_data(b": ", buf, 2);
buf = buf.add(2);
write_data(v, buf, v_len);
buf = buf.add(v_len);
write_data(b"\r\n", buf, 2);
buf = buf.add(2);
};
pos += len;
remaining -= len;
} }
} });
// final cursor synchronization with the bytes container
//
// SAFETY: all the bytes written up to position "pos" are initialized
// the written byte count and pointer advancement are kept in sync
unsafe { unsafe {
dst.advance_mut(pos); dst.advance_mut(pos);
} }
@ -226,6 +213,24 @@ pub(crate) trait MessageType: Sized {
Ok(()) Ok(())
} }
fn write_headers<F>(&mut self, mut f: F)
where
F: FnMut(&HeaderName, &Value),
{
match self.extra_headers() {
Some(headers) => {
// merging headers from head and extra headers.
self.headers()
.inner
.iter()
.filter(|(name, _)| !headers.contains_key(*name))
.chain(headers.inner.iter())
.for_each(|(k, v)| f(k, v))
}
None => self.headers().inner.iter().for_each(|(k, v)| f(k, v)),
}
}
} }
impl MessageType for Response<()> { impl MessageType for Response<()> {
@ -282,7 +287,7 @@ impl MessageType for RequestHeadType {
let head = self.as_ref(); let head = self.as_ref();
dst.reserve(256 + head.headers.len() * AVERAGE_HEADER_SIZE); dst.reserve(256 + head.headers.len() * AVERAGE_HEADER_SIZE);
write!( write!(
Writer(dst), helpers::Writer(dst),
"{} {} {}", "{} {} {}",
head.method, head.method,
head.uri.path_and_query().map(|u| u.as_str()).unwrap_or("/"), head.uri.path_and_query().map(|u| u.as_str()).unwrap_or("/"),
@ -415,7 +420,7 @@ impl TransferEncoding {
*eof = true; *eof = true;
buf.extend_from_slice(b"0\r\n\r\n"); buf.extend_from_slice(b"0\r\n\r\n");
} else { } else {
writeln!(Writer(buf), "{:X}\r", msg.len()) writeln!(helpers::Writer(buf), "{:X}\r", msg.len())
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))?; .map_err(|e| io::Error::new(io::ErrorKind::Other, e))?;
buf.reserve(msg.len() + 2); buf.reserve(msg.len() + 2);
@ -465,47 +470,37 @@ impl TransferEncoding {
} }
} }
struct Writer<'a>(pub &'a mut BytesMut); /// # Safety
/// Callers must ensure that the given length matches given value length.
impl<'a> io::Write for Writer<'a> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.extend_from_slice(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
unsafe fn write_data(value: &[u8], buf: *mut u8, len: usize) { unsafe fn write_data(value: &[u8], buf: *mut u8, len: usize) {
debug_assert_eq!(value.len(), len);
copy_nonoverlapping(value.as_ptr(), buf, len); copy_nonoverlapping(value.as_ptr(), buf, len);
} }
fn write_camel_case(value: &[u8], buffer: &mut [u8]) { fn write_camel_case(value: &[u8], buffer: &mut [u8]) {
let mut index = 0; // first copy entire (potentially wrong) slice to output
let key = value; buffer[..value.len()].copy_from_slice(value);
let mut key_iter = key.iter();
if let Some(c) = key_iter.next() { let mut iter = value.iter();
if *c >= b'a' && *c <= b'z' {
buffer[index] = *c ^ b' '; // first character should be uppercase
index += 1; if let Some(c @ b'a'..=b'z') = iter.next() {
} buffer[0] = c & 0b1101_1111;
} else {
return;
} }
while let Some(c) = key_iter.next() { // track 1 ahead of the current position since that's the location being assigned to
buffer[index] = *c; let mut index = 2;
index += 1;
if *c == b'-' { // remaining characters after hyphens should also be uppercase
if let Some(c) = key_iter.next() { while let Some(&c) = iter.next() {
if *c >= b'a' && *c <= b'z' { if c == b'-' {
buffer[index] = *c ^ b' '; // advance iter by one and uppercase if needed
index += 1; if let Some(c @ b'a'..=b'z') = iter.next() {
} buffer[index] = c & 0b1101_1111;
} }
} }
index += 1;
} }
} }
@ -554,6 +549,8 @@ mod tests {
); );
let data = let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap(); String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
eprintln!("{}", &data);
assert!(data.contains("Content-Length: 0\r\n")); assert!(data.contains("Content-Length: 0\r\n"));
assert!(data.contains("Connection: close\r\n")); assert!(data.contains("Connection: close\r\n"));
assert!(data.contains("Content-Type: plain/text\r\n")); assert!(data.contains("Content-Type: plain/text\r\n"));
@ -629,4 +626,28 @@ mod tests {
assert!(data.contains("authorization: another authorization\r\n")); assert!(data.contains("authorization: another authorization\r\n"));
assert!(data.contains("date: date\r\n")); assert!(data.contains("date: date\r\n"));
} }
#[test]
fn test_no_content_length() {
let mut bytes = BytesMut::with_capacity(2048);
let mut res: Response<()> =
Response::new(StatusCode::SWITCHING_PROTOCOLS).into_body::<()>();
res.headers_mut()
.insert(DATE, HeaderValue::from_static(&""));
res.headers_mut()
.insert(CONTENT_LENGTH, HeaderValue::from_static(&"0"));
let _ = res.encode_headers(
&mut bytes,
Version::HTTP_11,
BodySize::Stream,
ConnectionType::Upgrade,
&ServiceConfig::default(),
);
let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(!data.contains("content-length: 0\r\n"));
assert!(!data.contains("transfer-encoding: chunked\r\n"));
}
} }

View File

@ -1,38 +1,36 @@
use std::task::{Context, Poll}; use std::task::Poll;
use actix_service::{Service, ServiceFactory}; use actix_service::{Service, ServiceFactory};
use futures_util::future::{ok, Ready}; use futures_util::future::{ready, Ready};
use crate::error::Error; use crate::error::Error;
use crate::request::Request; use crate::request::Request;
pub struct ExpectHandler; pub struct ExpectHandler;
impl ServiceFactory for ExpectHandler { impl ServiceFactory<Request> for ExpectHandler {
type Config = ();
type Request = Request;
type Response = Request; type Response = Request;
type Error = Error; type Error = Error;
type Config = ();
type Service = ExpectHandler; type Service = ExpectHandler;
type InitError = Error; type InitError = Error;
type Future = Ready<Result<Self::Service, Self::InitError>>; type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future { fn new_service(&self, _: Self::Config) -> Self::Future {
ok(ExpectHandler) ready(Ok(ExpectHandler))
} }
} }
impl Service for ExpectHandler { impl Service<Request> for ExpectHandler {
type Request = Request;
type Response = Request; type Response = Request;
type Error = Error; type Error = Error;
type Future = Ready<Result<Self::Response, Self::Error>>; type Future = Ready<Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { actix_service::always_ready!();
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Request) -> Self::Future { fn call(&self, req: Request) -> Self::Future {
ok(req) ready(Ok(req))
// TODO: add some way to trigger error
// Err(error::ErrorExpectationFailed("test"))
} }
} }

View File

@ -17,7 +17,7 @@ pub use self::codec::Codec;
pub use self::dispatcher::Dispatcher; pub use self::dispatcher::Dispatcher;
pub use self::expect::ExpectHandler; pub use self::expect::ExpectHandler;
pub use self::payload::Payload; pub use self::payload::Payload;
pub use self::service::{H1Service, H1ServiceHandler, OneRequest}; pub use self::service::{H1Service, H1ServiceHandler};
pub use self::upgrade::UpgradeHandler; pub use self::upgrade::UpgradeHandler;
pub use self::utils::SendResponse; pub use self::utils::SendResponse;

View File

@ -182,9 +182,7 @@ impl Inner {
self.len += data.len(); self.len += data.len();
self.items.push_back(data); self.items.push_back(data);
self.need_read = self.len < MAX_BUFFER_SIZE; self.need_read = self.len < MAX_BUFFER_SIZE;
if let Some(task) = self.task.take() { self.task.wake();
task.wake()
}
} }
#[cfg(test)] #[cfg(test)]

View File

@ -9,40 +9,40 @@ use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_rt::net::TcpStream; use actix_rt::net::TcpStream;
use actix_service::{pipeline_factory, IntoServiceFactory, Service, ServiceFactory}; use actix_service::{pipeline_factory, IntoServiceFactory, Service, ServiceFactory};
use futures_core::ready; use futures_core::ready;
use futures_util::future::{ok, Ready}; use futures_util::future::ready;
use crate::body::MessageBody; use crate::body::MessageBody;
use crate::cloneable::CloneableService;
use crate::config::ServiceConfig; use crate::config::ServiceConfig;
use crate::error::{DispatchError, Error, ParseError}; use crate::error::{DispatchError, Error};
use crate::helpers::DataFactory;
use crate::request::Request; use crate::request::Request;
use crate::response::Response; use crate::response::Response;
use crate::service::HttpFlow;
use crate::{ConnectCallback, OnConnectData};
use super::codec::Codec; use super::codec::Codec;
use super::dispatcher::Dispatcher; use super::dispatcher::Dispatcher;
use super::{ExpectHandler, Message, UpgradeHandler}; use super::{ExpectHandler, UpgradeHandler};
/// `ServiceFactory` implementation for HTTP1 transport /// `ServiceFactory` implementation for HTTP1 transport
pub struct H1Service<T, S, B, X = ExpectHandler, U = UpgradeHandler<T>> { pub struct H1Service<T, S, B, X = ExpectHandler, U = UpgradeHandler> {
srv: S, srv: S,
cfg: ServiceConfig, cfg: ServiceConfig,
expect: X, expect: X,
upgrade: Option<U>, upgrade: Option<U>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>, on_connect_ext: Option<Rc<ConnectCallback<T>>>,
_t: PhantomData<(T, B)>, _phantom: PhantomData<B>,
} }
impl<T, S, B> H1Service<T, S, B> impl<T, S, B> H1Service<T, S, B>
where where
S: ServiceFactory<Config = (), Request = Request>, S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Error>, S::Error: Into<Error>,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
S::Response: Into<Response<B>>, S::Response: Into<Response<B>>,
B: MessageBody, B: MessageBody,
{ {
/// Create new `HttpService` instance with config. /// Create new `HttpService` instance with config.
pub(crate) fn with_config<F: IntoServiceFactory<S>>( pub(crate) fn with_config<F: IntoServiceFactory<S, Request>>(
cfg: ServiceConfig, cfg: ServiceConfig,
service: F, service: F,
) -> Self { ) -> Self {
@ -51,27 +51,23 @@ where
srv: service.into_factory(), srv: service.into_factory(),
expect: ExpectHandler, expect: ExpectHandler,
upgrade: None, upgrade: None,
on_connect: None, on_connect_ext: None,
_t: PhantomData, _phantom: PhantomData,
} }
} }
} }
impl<S, B, X, U> H1Service<TcpStream, S, B, X, U> impl<S, B, X, U> H1Service<TcpStream, S, B, X, U>
where where
S: ServiceFactory<Config = (), Request = Request>, S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Error>, S::Error: Into<Error>,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
S::Response: Into<Response<B>>, S::Response: Into<Response<B>>,
B: MessageBody, B: MessageBody,
X: ServiceFactory<Config = (), Request = Request, Response = Request>, X: ServiceFactory<Request, Config = (), Response = Request>,
X::Error: Into<Error>, X::Error: Into<Error>,
X::InitError: fmt::Debug, X::InitError: fmt::Debug,
U: ServiceFactory< U: ServiceFactory<(Request, Framed<TcpStream, Codec>), Config = (), Response = ()>,
Config = (),
Request = (Request, Framed<TcpStream, Codec>),
Response = (),
>,
U::Error: fmt::Display + Into<Error>, U::Error: fmt::Display + Into<Error>,
U::InitError: fmt::Debug, U::InitError: fmt::Debug,
{ {
@ -79,15 +75,15 @@ where
pub fn tcp( pub fn tcp(
self, self,
) -> impl ServiceFactory< ) -> impl ServiceFactory<
TcpStream,
Config = (), Config = (),
Request = TcpStream,
Response = (), Response = (),
Error = DispatchError, Error = DispatchError,
InitError = (), InitError = (),
> { > {
pipeline_factory(|io: TcpStream| { pipeline_factory(|io: TcpStream| {
let peer_addr = io.peer_addr().ok(); let peer_addr = io.peer_addr().ok();
ok((io, peer_addr)) ready(Ok((io, peer_addr)))
}) })
.and_then(self) .and_then(self)
} }
@ -97,22 +93,23 @@ where
mod openssl { mod openssl {
use super::*; use super::*;
use actix_tls::openssl::{Acceptor, SslAcceptor, SslStream}; use actix_service::ServiceFactoryExt;
use actix_tls::{openssl::HandshakeError, SslError}; use actix_tls::accept::openssl::{Acceptor, SslAcceptor, SslError, SslStream};
use actix_tls::accept::TlsError;
impl<S, B, X, U> H1Service<SslStream<TcpStream>, S, B, X, U> impl<S, B, X, U> H1Service<SslStream<TcpStream>, S, B, X, U>
where where
S: ServiceFactory<Config = (), Request = Request>, S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Error>, S::Error: Into<Error>,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
S::Response: Into<Response<B>>, S::Response: Into<Response<B>>,
B: MessageBody, B: MessageBody,
X: ServiceFactory<Config = (), Request = Request, Response = Request>, X: ServiceFactory<Request, Config = (), Response = Request>,
X::Error: Into<Error>, X::Error: Into<Error>,
X::InitError: fmt::Debug, X::InitError: fmt::Debug,
U: ServiceFactory< U: ServiceFactory<
(Request, Framed<SslStream<TcpStream>, Codec>),
Config = (), Config = (),
Request = (Request, Framed<SslStream<TcpStream>, Codec>),
Response = (), Response = (),
>, >,
U::Error: fmt::Display + Into<Error>, U::Error: fmt::Display + Into<Error>,
@ -123,22 +120,22 @@ mod openssl {
self, self,
acceptor: SslAcceptor, acceptor: SslAcceptor,
) -> impl ServiceFactory< ) -> impl ServiceFactory<
TcpStream,
Config = (), Config = (),
Request = TcpStream,
Response = (), Response = (),
Error = SslError<HandshakeError<TcpStream>, DispatchError>, Error = TlsError<SslError, DispatchError>,
InitError = (), InitError = (),
> { > {
pipeline_factory( pipeline_factory(
Acceptor::new(acceptor) Acceptor::new(acceptor)
.map_err(SslError::Ssl) .map_err(TlsError::Tls)
.map_init_err(|_| panic!()), .map_init_err(|_| panic!()),
) )
.and_then(|io: SslStream<TcpStream>| { .and_then(|io: SslStream<TcpStream>| {
let peer_addr = io.get_ref().peer_addr().ok(); let peer_addr = io.get_ref().peer_addr().ok();
ok((io, peer_addr)) ready(Ok((io, peer_addr)))
}) })
.and_then(self.map_err(SslError::Service)) .and_then(self.map_err(TlsError::Service))
} }
} }
} }
@ -146,23 +143,24 @@ mod openssl {
#[cfg(feature = "rustls")] #[cfg(feature = "rustls")]
mod rustls { mod rustls {
use super::*; use super::*;
use actix_tls::rustls::{Acceptor, ServerConfig, TlsStream}; use actix_service::ServiceFactoryExt;
use actix_tls::SslError; use actix_tls::accept::rustls::{Acceptor, ServerConfig, TlsStream};
use actix_tls::accept::TlsError;
use std::{fmt, io}; use std::{fmt, io};
impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U> impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U>
where where
S: ServiceFactory<Config = (), Request = Request>, S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Error>, S::Error: Into<Error>,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
S::Response: Into<Response<B>>, S::Response: Into<Response<B>>,
B: MessageBody, B: MessageBody,
X: ServiceFactory<Config = (), Request = Request, Response = Request>, X: ServiceFactory<Request, Config = (), Response = Request>,
X::Error: Into<Error>, X::Error: Into<Error>,
X::InitError: fmt::Debug, X::InitError: fmt::Debug,
U: ServiceFactory< U: ServiceFactory<
(Request, Framed<TlsStream<TcpStream>, Codec>),
Config = (), Config = (),
Request = (Request, Framed<TlsStream<TcpStream>, Codec>),
Response = (), Response = (),
>, >,
U::Error: fmt::Display + Into<Error>, U::Error: fmt::Display + Into<Error>,
@ -173,29 +171,29 @@ mod rustls {
self, self,
config: ServerConfig, config: ServerConfig,
) -> impl ServiceFactory< ) -> impl ServiceFactory<
TcpStream,
Config = (), Config = (),
Request = TcpStream,
Response = (), Response = (),
Error = SslError<io::Error, DispatchError>, Error = TlsError<io::Error, DispatchError>,
InitError = (), InitError = (),
> { > {
pipeline_factory( pipeline_factory(
Acceptor::new(config) Acceptor::new(config)
.map_err(SslError::Ssl) .map_err(TlsError::Tls)
.map_init_err(|_| panic!()), .map_init_err(|_| panic!()),
) )
.and_then(|io: TlsStream<TcpStream>| { .and_then(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().0.peer_addr().ok(); let peer_addr = io.get_ref().0.peer_addr().ok();
ok((io, peer_addr)) ready(Ok((io, peer_addr)))
}) })
.and_then(self.map_err(SslError::Service)) .and_then(self.map_err(TlsError::Service))
} }
} }
} }
impl<T, S, B, X, U> H1Service<T, S, B, X, U> impl<T, S, B, X, U> H1Service<T, S, B, X, U>
where where
S: ServiceFactory<Config = (), Request = Request>, S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Error>, S::Error: Into<Error>,
S::Response: Into<Response<B>>, S::Response: Into<Response<B>>,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
@ -203,7 +201,7 @@ where
{ {
pub fn expect<X1>(self, expect: X1) -> H1Service<T, S, B, X1, U> pub fn expect<X1>(self, expect: X1) -> H1Service<T, S, B, X1, U>
where where
X1: ServiceFactory<Request = Request, Response = Request>, X1: ServiceFactory<Request, Response = Request>,
X1::Error: Into<Error>, X1::Error: Into<Error>,
X1::InitError: fmt::Debug, X1::InitError: fmt::Debug,
{ {
@ -212,14 +210,14 @@ where
cfg: self.cfg, cfg: self.cfg,
srv: self.srv, srv: self.srv,
upgrade: self.upgrade, upgrade: self.upgrade,
on_connect: self.on_connect, on_connect_ext: self.on_connect_ext,
_t: PhantomData, _phantom: PhantomData,
} }
} }
pub fn upgrade<U1>(self, upgrade: Option<U1>) -> H1Service<T, S, B, X, U1> pub fn upgrade<U1>(self, upgrade: Option<U1>) -> H1Service<T, S, B, X, U1>
where where
U1: ServiceFactory<Request = (Request, Framed<T, Codec>), Response = ()>, U1: ServiceFactory<(Request, Framed<T, Codec>), Response = ()>,
U1::Error: fmt::Display, U1::Error: fmt::Display,
U1::InitError: fmt::Debug, U1::InitError: fmt::Debug,
{ {
@ -228,42 +226,39 @@ where
cfg: self.cfg, cfg: self.cfg,
srv: self.srv, srv: self.srv,
expect: self.expect, expect: self.expect,
on_connect: self.on_connect, on_connect_ext: self.on_connect_ext,
_t: PhantomData, _phantom: PhantomData,
} }
} }
/// Set on connect callback. /// Set on connect callback.
pub(crate) fn on_connect( pub(crate) fn on_connect_ext(mut self, f: Option<Rc<ConnectCallback<T>>>) -> Self {
mut self, self.on_connect_ext = f;
f: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
) -> Self {
self.on_connect = f;
self self
} }
} }
impl<T, S, B, X, U> ServiceFactory for H1Service<T, S, B, X, U> impl<T, S, B, X, U> ServiceFactory<(T, Option<net::SocketAddr>)>
for H1Service<T, S, B, X, U>
where where
T: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin,
S: ServiceFactory<Config = (), Request = Request>, S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Error>, S::Error: Into<Error>,
S::Response: Into<Response<B>>, S::Response: Into<Response<B>>,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
B: MessageBody, B: MessageBody,
X: ServiceFactory<Config = (), Request = Request, Response = Request>, X: ServiceFactory<Request, Config = (), Response = Request>,
X::Error: Into<Error>, X::Error: Into<Error>,
X::InitError: fmt::Debug, X::InitError: fmt::Debug,
U: ServiceFactory<Config = (), Request = (Request, Framed<T, Codec>), Response = ()>, U: ServiceFactory<(Request, Framed<T, Codec>), Config = (), Response = ()>,
U::Error: fmt::Display + Into<Error>, U::Error: fmt::Display + Into<Error>,
U::InitError: fmt::Debug, U::InitError: fmt::Debug,
{ {
type Config = ();
type Request = (T, Option<net::SocketAddr>);
type Response = (); type Response = ();
type Error = DispatchError; type Error = DispatchError;
type InitError = (); type Config = ();
type Service = H1ServiceHandler<T, S::Service, B, X::Service, U::Service>; type Service = H1ServiceHandler<T, S::Service, B, X::Service, U::Service>;
type InitError = ();
type Future = H1ServiceResponse<T, S, B, X, U>; type Future = H1ServiceResponse<T, S, B, X, U>;
fn new_service(&self, _: ()) -> Self::Future { fn new_service(&self, _: ()) -> Self::Future {
@ -273,9 +268,9 @@ where
fut_upg: self.upgrade.as_ref().map(|f| f.new_service(())), fut_upg: self.upgrade.as_ref().map(|f| f.new_service(())),
expect: None, expect: None,
upgrade: None, upgrade: None,
on_connect: self.on_connect.clone(), on_connect_ext: self.on_connect_ext.clone(),
cfg: Some(self.cfg.clone()), cfg: Some(self.cfg.clone()),
_t: PhantomData, _phantom: PhantomData,
} }
} }
} }
@ -284,13 +279,13 @@ where
#[pin_project::pin_project] #[pin_project::pin_project]
pub struct H1ServiceResponse<T, S, B, X, U> pub struct H1ServiceResponse<T, S, B, X, U>
where where
S: ServiceFactory<Request = Request>, S: ServiceFactory<Request>,
S::Error: Into<Error>, S::Error: Into<Error>,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
X: ServiceFactory<Request = Request, Response = Request>, X: ServiceFactory<Request, Response = Request>,
X::Error: Into<Error>, X::Error: Into<Error>,
X::InitError: fmt::Debug, X::InitError: fmt::Debug,
U: ServiceFactory<Request = (Request, Framed<T, Codec>), Response = ()>, U: ServiceFactory<(Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display, U::Error: fmt::Display,
U::InitError: fmt::Debug, U::InitError: fmt::Debug,
{ {
@ -302,23 +297,23 @@ where
fut_upg: Option<U::Future>, fut_upg: Option<U::Future>,
expect: Option<X::Service>, expect: Option<X::Service>,
upgrade: Option<U::Service>, upgrade: Option<U::Service>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>, on_connect_ext: Option<Rc<ConnectCallback<T>>>,
cfg: Option<ServiceConfig>, cfg: Option<ServiceConfig>,
_t: PhantomData<(T, B)>, _phantom: PhantomData<B>,
} }
impl<T, S, B, X, U> Future for H1ServiceResponse<T, S, B, X, U> impl<T, S, B, X, U> Future for H1ServiceResponse<T, S, B, X, U>
where where
T: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin,
S: ServiceFactory<Request = Request>, S: ServiceFactory<Request>,
S::Error: Into<Error>, S::Error: Into<Error>,
S::Response: Into<Response<B>>, S::Response: Into<Response<B>>,
S::InitError: fmt::Debug, S::InitError: fmt::Debug,
B: MessageBody, B: MessageBody,
X: ServiceFactory<Request = Request, Response = Request>, X: ServiceFactory<Request, Response = Request>,
X::Error: Into<Error>, X::Error: Into<Error>,
X::InitError: fmt::Debug, X::InitError: fmt::Debug,
U: ServiceFactory<Request = (Request, Framed<T, Codec>), Response = ()>, U: ServiceFactory<(Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display, U::Error: fmt::Display,
U::InitError: fmt::Debug, U::InitError: fmt::Debug,
{ {
@ -342,7 +337,7 @@ where
.map_err(|e| log::error!("Init http service error: {:?}", e)))?; .map_err(|e| log::error!("Init http service error: {:?}", e)))?;
this = self.as_mut().project(); this = self.as_mut().project();
*this.upgrade = Some(upgrade); *this.upgrade = Some(upgrade);
this.fut_ex.set(None); this.fut_upg.set(None);
} }
let result = ready!(this let result = ready!(this
@ -352,75 +347,78 @@ where
Poll::Ready(result.map(|service| { Poll::Ready(result.map(|service| {
let this = self.as_mut().project(); let this = self.as_mut().project();
H1ServiceHandler::new( H1ServiceHandler::new(
this.cfg.take().unwrap(), this.cfg.take().unwrap(),
service, service,
this.expect.take().unwrap(), this.expect.take().unwrap(),
this.upgrade.take(), this.upgrade.take(),
this.on_connect.clone(), this.on_connect_ext.clone(),
) )
})) }))
} }
} }
/// `Service` implementation for HTTP1 transport /// `Service` implementation for HTTP/1 transport
pub struct H1ServiceHandler<T, S: Service, B, X: Service, U: Service> { pub struct H1ServiceHandler<T, S, B, X, U>
srv: CloneableService<S>, where
expect: CloneableService<X>, S: Service<Request>,
upgrade: Option<CloneableService<U>>, X: Service<Request>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>, U: Service<(Request, Framed<T, Codec>)>,
{
flow: Rc<HttpFlow<S, X, U>>,
on_connect_ext: Option<Rc<ConnectCallback<T>>>,
cfg: ServiceConfig, cfg: ServiceConfig,
_t: PhantomData<(T, B)>, _phantom: PhantomData<B>,
} }
impl<T, S, B, X, U> H1ServiceHandler<T, S, B, X, U> impl<T, S, B, X, U> H1ServiceHandler<T, S, B, X, U>
where where
S: Service<Request = Request>, S: Service<Request>,
S::Error: Into<Error>, S::Error: Into<Error>,
S::Response: Into<Response<B>>, S::Response: Into<Response<B>>,
B: MessageBody, B: MessageBody,
X: Service<Request = Request, Response = Request>, X: Service<Request, Response = Request>,
X::Error: Into<Error>, X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>, U: Service<(Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display, U::Error: fmt::Display,
{ {
fn new( fn new(
cfg: ServiceConfig, cfg: ServiceConfig,
srv: S, service: S,
expect: X, expect: X,
upgrade: Option<U>, upgrade: Option<U>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>, on_connect_ext: Option<Rc<ConnectCallback<T>>>,
) -> H1ServiceHandler<T, S, B, X, U> { ) -> H1ServiceHandler<T, S, B, X, U> {
H1ServiceHandler { H1ServiceHandler {
srv: CloneableService::new(srv), flow: HttpFlow::new(service, expect, upgrade),
expect: CloneableService::new(expect),
upgrade: upgrade.map(CloneableService::new),
cfg, cfg,
on_connect, on_connect_ext,
_t: PhantomData, _phantom: PhantomData,
} }
} }
} }
impl<T, S, B, X, U> Service for H1ServiceHandler<T, S, B, X, U> impl<T, S, B, X, U> Service<(T, Option<net::SocketAddr>)>
for H1ServiceHandler<T, S, B, X, U>
where where
T: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request = Request>, S: Service<Request>,
S::Error: Into<Error>, S::Error: Into<Error>,
S::Response: Into<Response<B>>, S::Response: Into<Response<B>>,
B: MessageBody, B: MessageBody,
X: Service<Request = Request, Response = Request>, X: Service<Request, Response = Request>,
X::Error: Into<Error>, X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>, U: Service<(Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display + Into<Error>, U::Error: fmt::Display + Into<Error>,
{ {
type Request = (T, Option<net::SocketAddr>);
type Response = (); type Response = ();
type Error = DispatchError; type Error = DispatchError;
type Future = Dispatcher<T, S, B, X, U>; type Future = Dispatcher<T, S, B, X, U>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let ready = self let ready = self
.flow
.expect .expect
.poll_ready(cx) .poll_ready(cx)
.map_err(|e| { .map_err(|e| {
@ -431,7 +429,8 @@ where
.is_ready(); .is_ready();
let ready = self let ready = self
.srv .flow
.service
.poll_ready(cx) .poll_ready(cx)
.map_err(|e| { .map_err(|e| {
let e = e.into(); let e = e.into();
@ -441,7 +440,7 @@ where
.is_ready() .is_ready()
&& ready; && ready;
let ready = if let Some(ref mut upg) = self.upgrade { let ready = if let Some(ref upg) = self.flow.upgrade {
upg.poll_ready(cx) upg.poll_ready(cx)
.map_err(|e| { .map_err(|e| {
let e = e.into(); let e = e.into();
@ -461,117 +460,16 @@ where
} }
} }
fn call(&mut self, (io, addr): Self::Request) -> Self::Future { fn call(&self, (io, addr): (T, Option<net::SocketAddr>)) -> Self::Future {
let on_connect = if let Some(ref on_connect) = self.on_connect { let on_connect_data =
Some(on_connect(&io)) OnConnectData::from_io(&io, self.on_connect_ext.as_deref());
} else {
None
};
Dispatcher::new( Dispatcher::new(
io, io,
self.cfg.clone(), self.cfg.clone(),
self.srv.clone(), self.flow.clone(),
self.expect.clone(), on_connect_data,
self.upgrade.clone(),
on_connect,
addr, addr,
) )
} }
} }
/// `ServiceFactory` implementation for `OneRequestService` service
#[derive(Default)]
pub struct OneRequest<T> {
config: ServiceConfig,
_t: PhantomData<T>,
}
impl<T> OneRequest<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
/// Create new `H1SimpleService` instance.
pub fn new() -> Self {
OneRequest {
config: ServiceConfig::default(),
_t: PhantomData,
}
}
}
impl<T> ServiceFactory for OneRequest<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
type Config = ();
type Request = T;
type Response = (Request, Framed<T, Codec>);
type Error = ParseError;
type InitError = ();
type Service = OneRequestService<T>;
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
ok(OneRequestService {
_t: PhantomData,
config: self.config.clone(),
})
}
}
/// `Service` implementation for HTTP1 transport. Reads one request and returns
/// request and framed object.
pub struct OneRequestService<T> {
_t: PhantomData<T>,
config: ServiceConfig,
}
impl<T> Service for OneRequestService<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
type Request = T;
type Response = (Request, Framed<T, Codec>);
type Error = ParseError;
type Future = OneRequestServiceResponse<T>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Self::Request) -> Self::Future {
OneRequestServiceResponse {
framed: Some(Framed::new(req, Codec::new(self.config.clone()))),
}
}
}
#[doc(hidden)]
pub struct OneRequestServiceResponse<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
framed: Option<Framed<T, Codec>>,
}
impl<T> Future for OneRequestServiceResponse<T>
where
T: AsyncRead + AsyncWrite + Unpin,
{
type Output = Result<(Request, Framed<T, Codec>), ParseError>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.framed.as_mut().unwrap().next_item(cx) {
Poll::Ready(Some(Ok(req))) => match req {
Message::Item(req) => {
Poll::Ready(Ok((req, self.framed.take().unwrap())))
}
Message::Chunk(_) => unreachable!("Something is wrong"),
},
Poll::Ready(Some(Err(err))) => Poll::Ready(Err(err)),
Poll::Ready(None) => Poll::Ready(Err(ParseError::Incomplete)),
Poll::Pending => Poll::Pending,
}
}
}

View File

@ -1,22 +1,20 @@
use std::marker::PhantomData; use std::task::Poll;
use std::task::{Context, Poll};
use actix_codec::Framed; use actix_codec::Framed;
use actix_service::{Service, ServiceFactory}; use actix_service::{Service, ServiceFactory};
use futures_util::future::Ready; use futures_util::future::{ready, Ready};
use crate::error::Error; use crate::error::Error;
use crate::h1::Codec; use crate::h1::Codec;
use crate::request::Request; use crate::request::Request;
pub struct UpgradeHandler<T>(PhantomData<T>); pub struct UpgradeHandler;
impl<T> ServiceFactory for UpgradeHandler<T> { impl<T> ServiceFactory<(Request, Framed<T, Codec>)> for UpgradeHandler {
type Config = ();
type Request = (Request, Framed<T, Codec>);
type Response = (); type Response = ();
type Error = Error; type Error = Error;
type Service = UpgradeHandler<T>; type Config = ();
type Service = UpgradeHandler;
type InitError = Error; type InitError = Error;
type Future = Ready<Result<Self::Service, Self::InitError>>; type Future = Ready<Result<Self::Service, Self::InitError>>;
@ -25,17 +23,14 @@ impl<T> ServiceFactory for UpgradeHandler<T> {
} }
} }
impl<T> Service for UpgradeHandler<T> { impl<T> Service<(Request, Framed<T, Codec>)> for UpgradeHandler {
type Request = (Request, Framed<T, Codec>);
type Response = (); type Response = ();
type Error = Error; type Error = Error;
type Future = Ready<Result<Self::Response, Self::Error>>; type Future = Ready<Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { actix_service::always_ready!();
Poll::Ready(Ok(()))
}
fn call(&mut self, _: Self::Request) -> Self::Future { fn call(&self, _: (Request, Framed<T, Codec>)) -> Self::Future {
unimplemented!() ready(Ok(()))
} }
} }

View File

@ -9,12 +9,13 @@ use crate::error::Error;
use crate::h1::{Codec, Message}; use crate::h1::{Codec, Message};
use crate::response::Response; use crate::response::Response;
/// Send http/1 response /// Send HTTP/1 response
#[pin_project::pin_project] #[pin_project::pin_project]
pub struct SendResponse<T, B> { pub struct SendResponse<T, B> {
res: Option<Message<(Response<()>, BodySize)>>, res: Option<Message<(Response<()>, BodySize)>>,
#[pin] #[pin]
body: Option<ResponseBody<B>>, body: Option<ResponseBody<B>>,
#[pin]
framed: Option<Framed<T, Codec>>, framed: Option<Framed<T, Codec>>,
} }
@ -35,23 +36,30 @@ where
impl<T, B> Future for SendResponse<T, B> impl<T, B> Future for SendResponse<T, B>
where where
T: AsyncRead + AsyncWrite, T: AsyncRead + AsyncWrite + Unpin,
B: MessageBody + Unpin, B: MessageBody + Unpin,
{ {
type Output = Result<Framed<T, Codec>, Error>; type Output = Result<Framed<T, Codec>, Error>;
// TODO: rethink if we need loops in polls // TODO: rethink if we need loops in polls
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.project(); let mut this = self.as_mut().project();
let mut body_done = this.body.is_none(); let mut body_done = this.body.is_none();
loop { loop {
let mut body_ready = !body_done; let mut body_ready = !body_done;
let framed = this.framed.as_mut().unwrap();
// send body // send body
if this.res.is_none() && body_ready { if this.res.is_none() && body_ready {
while body_ready && !body_done && !framed.is_write_buf_full() { while body_ready
&& !body_done
&& !this
.framed
.as_ref()
.as_pin_ref()
.unwrap()
.is_write_buf_full()
{
match this.body.as_mut().as_pin_mut().unwrap().poll_next(cx)? { match this.body.as_mut().as_pin_mut().unwrap().poll_next(cx)? {
Poll::Ready(item) => { Poll::Ready(item) => {
// body is done when item is None // body is done when item is None
@ -59,6 +67,7 @@ where
if body_done { if body_done {
let _ = this.body.take(); let _ = this.body.take();
} }
let framed = this.framed.as_mut().as_pin_mut().unwrap();
framed.write(Message::Chunk(item))?; framed.write(Message::Chunk(item))?;
} }
Poll::Pending => body_ready = false, Poll::Pending => body_ready = false,
@ -66,6 +75,8 @@ where
} }
} }
let framed = this.framed.as_mut().as_pin_mut().unwrap();
// flush write buffer // flush write buffer
if !framed.is_write_buf_empty() { if !framed.is_write_buf_empty() {
match framed.flush(cx)? { match framed.flush(cx)? {
@ -96,6 +107,9 @@ where
break; break;
} }
} }
Poll::Ready(Ok(this.framed.take().unwrap()))
let framed = this.framed.take().unwrap();
Poll::Ready(Ok(framed))
} }
} }

View File

@ -1,63 +1,65 @@
use std::convert::TryFrom;
use std::future::Future; use std::future::Future;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::net; use std::net;
use std::pin::Pin; use std::pin::Pin;
use std::rc::Rc;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use std::{cmp, convert::TryFrom};
use actix_codec::{AsyncRead, AsyncWrite}; use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::time::{Delay, Instant}; use actix_rt::time::{Instant, Sleep};
use actix_service::Service; use actix_service::Service;
use bytes::{Bytes, BytesMut}; use bytes::{Bytes, BytesMut};
use futures_core::ready;
use h2::server::{Connection, SendResponse}; use h2::server::{Connection, SendResponse};
use h2::SendStream; use h2::SendStream;
use http::header::{HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING}; use http::header::{HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING};
use log::{error, trace}; use log::{error, trace};
use crate::body::{BodySize, MessageBody, ResponseBody}; use crate::body::{BodySize, MessageBody, ResponseBody};
use crate::cloneable::CloneableService;
use crate::config::ServiceConfig; use crate::config::ServiceConfig;
use crate::error::{DispatchError, Error}; use crate::error::{DispatchError, Error};
use crate::helpers::DataFactory;
use crate::httpmessage::HttpMessage;
use crate::message::ResponseHead; use crate::message::ResponseHead;
use crate::payload::Payload; use crate::payload::Payload;
use crate::request::Request; use crate::request::Request;
use crate::response::Response; use crate::response::Response;
use crate::service::HttpFlow;
use crate::OnConnectData;
const CHUNK_SIZE: usize = 16_384; const CHUNK_SIZE: usize = 16_384;
/// Dispatcher for HTTP/2 protocol /// Dispatcher for HTTP/2 protocol.
#[pin_project::pin_project] #[pin_project::pin_project]
pub struct Dispatcher<T, S: Service<Request = Request>, B: MessageBody> pub struct Dispatcher<T, S, B, X, U>
where where
T: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request>,
B: MessageBody,
{ {
service: CloneableService<S>, flow: Rc<HttpFlow<S, X, U>>,
connection: Connection<T, Bytes>, connection: Connection<T, Bytes>,
on_connect: Option<Box<dyn DataFactory>>, on_connect_data: OnConnectData,
config: ServiceConfig, config: ServiceConfig,
peer_addr: Option<net::SocketAddr>, peer_addr: Option<net::SocketAddr>,
ka_expire: Instant, ka_expire: Instant,
ka_timer: Option<Delay>, ka_timer: Option<Sleep>,
_t: PhantomData<B>, _phantom: PhantomData<B>,
} }
impl<T, S, B> Dispatcher<T, S, B> impl<T, S, B, X, U> Dispatcher<T, S, B, X, U>
where where
T: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request = Request>, S: Service<Request>,
S::Error: Into<Error>, S::Error: Into<Error>,
// S::Future: 'static,
S::Response: Into<Response<B>>, S::Response: Into<Response<B>>,
B: MessageBody, B: MessageBody,
{ {
pub(crate) fn new( pub(crate) fn new(
service: CloneableService<S>, flow: Rc<HttpFlow<S, X, U>>,
connection: Connection<T, Bytes>, connection: Connection<T, Bytes>,
on_connect: Option<Box<dyn DataFactory>>, on_connect_data: OnConnectData,
config: ServiceConfig, config: ServiceConfig,
timeout: Option<Delay>, timeout: Option<Sleep>,
peer_addr: Option<net::SocketAddr>, peer_addr: Option<net::SocketAddr>,
) -> Self { ) -> Self {
// let keepalive = config.keep_alive_enabled(); // let keepalive = config.keep_alive_enabled();
@ -77,22 +79,22 @@ where
}; };
Dispatcher { Dispatcher {
service, flow,
config, config,
peer_addr, peer_addr,
connection, connection,
on_connect, on_connect_data,
ka_expire, ka_expire,
ka_timer, ka_timer,
_t: PhantomData, _phantom: PhantomData,
} }
} }
} }
impl<T, S, B> Future for Dispatcher<T, S, B> impl<T, S, B, X, U> Future for Dispatcher<T, S, B, X, U>
where where
T: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request = Request>, S: Service<Request>,
S::Error: Into<Error> + 'static, S::Error: Into<Error> + 'static,
S::Future: 'static, S::Future: 'static,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
@ -105,10 +107,12 @@ where
let this = self.get_mut(); let this = self.get_mut();
loop { loop {
match Pin::new(&mut this.connection).poll_accept(cx) { match ready!(Pin::new(&mut this.connection).poll_accept(cx)) {
Poll::Ready(None) => return Poll::Ready(Ok(())), None => return Poll::Ready(Ok(())),
Poll::Ready(Some(Err(err))) => return Poll::Ready(Err(err.into())),
Poll::Ready(Some(Ok((req, res)))) => { Some(Err(err)) => return Poll::Ready(Err(err.into())),
Some(Ok((req, res))) => {
// update keep-alive expire // update keep-alive expire
if this.ka_timer.is_some() { if this.ka_timer.is_some() {
if let Some(expire) = this.config.keep_alive_expire() { if let Some(expire) = this.config.keep_alive_expire() {
@ -117,11 +121,9 @@ where
} }
let (parts, body) = req.into_parts(); let (parts, body) = req.into_parts();
let mut req = Request::with_payload(Payload::< let pl = crate::h2::Payload::new(body);
crate::payload::PayloadStream, let pl = Payload::<crate::payload::PayloadStream>::H2(pl);
>::H2( let mut req = Request::with_payload(pl);
crate::h2::Payload::new(body)
));
let head = &mut req.head_mut(); let head = &mut req.head_mut();
head.uri = parts.uri; head.uri = parts.uri;
@ -130,27 +132,21 @@ where
head.headers = parts.headers.into(); head.headers = parts.headers.into();
head.peer_addr = this.peer_addr; head.peer_addr = this.peer_addr;
// set on_connect data // merge on_connect_ext data into request extensions
if let Some(ref on_connect) = this.on_connect { this.on_connect_data.merge_into(&mut req);
on_connect.set(&mut req.extensions_mut());
}
actix_rt::spawn(ServiceResponse::< let svc = ServiceResponse::<S::Future, S::Response, S::Error, B> {
S::Future,
S::Response,
S::Error,
B,
> {
state: ServiceResponseState::ServiceCall( state: ServiceResponseState::ServiceCall(
this.service.call(req), this.flow.service.call(req),
Some(res), Some(res),
), ),
config: this.config.clone(), config: this.config.clone(),
buffer: None, buffer: None,
_t: PhantomData, _phantom: PhantomData,
}); };
actix_rt::spawn(svc);
} }
Poll::Pending => return Poll::Pending,
} }
} }
} }
@ -162,7 +158,7 @@ struct ServiceResponse<F, I, E, B> {
state: ServiceResponseState<F, B>, state: ServiceResponseState<F, B>,
config: ServiceConfig, config: ServiceConfig,
buffer: Option<Bytes>, buffer: Option<Bytes>,
_t: PhantomData<(I, E)>, _phantom: PhantomData<(I, E)>,
} }
#[pin_project::pin_project(project = ServiceResponseStateProj)] #[pin_project::pin_project(project = ServiceResponseStateProj)]
@ -199,8 +195,9 @@ where
skip_len = true; skip_len = true;
*size = BodySize::Stream; *size = BodySize::Stream;
} }
_ => (), _ => {}
} }
let _ = match size { let _ = match size {
BodySize::None | BodySize::Stream => None, BodySize::None | BodySize::Stream => None,
BodySize::Empty => res BodySize::Empty => res
@ -215,11 +212,13 @@ where
// copy headers // copy headers
for (key, value) in head.headers.iter() { for (key, value) in head.headers.iter() {
match *key { match *key {
CONNECTION | TRANSFER_ENCODING => continue, // http2 specific // omit HTTP/1 only headers
CONNECTION | TRANSFER_ENCODING => continue,
CONTENT_LENGTH if skip_len => continue, CONTENT_LENGTH if skip_len => continue,
DATE => has_date = true, DATE => has_date = true,
_ => (), _ => {}
} }
res.headers_mut().append(key, value.clone()); res.headers_mut().append(key, value.clone());
} }
@ -227,9 +226,11 @@ where
if !has_date { if !has_date {
let mut bytes = BytesMut::with_capacity(29); let mut bytes = BytesMut::with_capacity(29);
self.config.set_date_header(&mut bytes); self.config.set_date_header(&mut bytes);
res.headers_mut().insert(DATE, unsafe { res.headers_mut().insert(
HeaderValue::from_maybe_shared_unchecked(bytes.freeze()) DATE,
}); // SAFETY: serialized date-times are known ASCII strings
unsafe { HeaderValue::from_maybe_shared_unchecked(bytes.freeze()) },
);
} }
res res
@ -249,109 +250,121 @@ where
let mut this = self.as_mut().project(); let mut this = self.as_mut().project();
match this.state.project() { match this.state.project() {
ServiceResponseStateProj::ServiceCall(call, send) => match call.poll(cx) { ServiceResponseStateProj::ServiceCall(call, send) => {
Poll::Ready(Ok(res)) => { match ready!(call.poll(cx)) {
let (res, body) = res.into().replace_body(()); Ok(res) => {
let (res, body) = res.into().replace_body(());
let mut send = send.take().unwrap(); let mut send = send.take().unwrap();
let mut size = body.size(); let mut size = body.size();
let h2_res = self.as_mut().prepare_response(res.head(), &mut size); let h2_res =
this = self.as_mut().project(); self.as_mut().prepare_response(res.head(), &mut size);
this = self.as_mut().project();
let stream = match send.send_response(h2_res, size.is_eof()) { let stream = match send.send_response(h2_res, size.is_eof()) {
Err(e) => { Err(e) => {
trace!("Error sending h2 response: {:?}", e); trace!("Error sending HTTP/2 response: {:?}", e);
return Poll::Ready(()); return Poll::Ready(());
}
Ok(stream) => stream,
};
if size.is_eof() {
Poll::Ready(())
} else {
this.state
.set(ServiceResponseState::SendPayload(stream, body));
self.poll(cx)
} }
Ok(stream) => stream, }
};
if size.is_eof() { Err(e) => {
Poll::Ready(()) let res: Response = e.into().into();
} else { let (res, body) = res.replace_body(());
this.state
.set(ServiceResponseState::SendPayload(stream, body)); let mut send = send.take().unwrap();
self.poll(cx) let mut size = body.size();
let h2_res =
self.as_mut().prepare_response(res.head(), &mut size);
this = self.as_mut().project();
let stream = match send.send_response(h2_res, size.is_eof()) {
Err(e) => {
trace!("Error sending HTTP/2 response: {:?}", e);
return Poll::Ready(());
}
Ok(stream) => stream,
};
if size.is_eof() {
Poll::Ready(())
} else {
this.state.set(ServiceResponseState::SendPayload(
stream,
body.into_body(),
));
self.poll(cx)
}
} }
} }
Poll::Pending => Poll::Pending, }
Poll::Ready(Err(e)) => {
let res: Response = e.into().into();
let (res, body) = res.replace_body(());
let mut send = send.take().unwrap(); ServiceResponseStateProj::SendPayload(ref mut stream, ref mut body) => {
let mut size = body.size();
let h2_res = self.as_mut().prepare_response(res.head(), &mut size);
this = self.as_mut().project();
let stream = match send.send_response(h2_res, size.is_eof()) {
Err(e) => {
trace!("Error sending h2 response: {:?}", e);
return Poll::Ready(());
}
Ok(stream) => stream,
};
if size.is_eof() {
Poll::Ready(())
} else {
this.state.set(ServiceResponseState::SendPayload(
stream,
body.into_body(),
));
self.poll(cx)
}
}
},
ServiceResponseStateProj::SendPayload(ref mut stream, ref mut body) => loop {
loop { loop {
if let Some(ref mut buffer) = this.buffer { loop {
match stream.poll_capacity(cx) { match this.buffer {
Poll::Pending => return Poll::Pending, Some(ref mut buffer) => {
Poll::Ready(None) => return Poll::Ready(()), match ready!(stream.poll_capacity(cx)) {
Poll::Ready(Some(Ok(cap))) => { None => return Poll::Ready(()),
let len = buffer.len();
let bytes = buffer.split_to(std::cmp::min(cap, len));
if let Err(e) = stream.send_data(bytes, false) { Some(Ok(cap)) => {
warn!("{:?}", e); let len = buffer.len();
let bytes = buffer.split_to(cmp::min(cap, len));
if let Err(e) = stream.send_data(bytes, false) {
warn!("{:?}", e);
return Poll::Ready(());
} else if !buffer.is_empty() {
let cap = cmp::min(buffer.len(), CHUNK_SIZE);
stream.reserve_capacity(cap);
} else {
this.buffer.take();
}
}
Some(Err(e)) => {
warn!("{:?}", e);
return Poll::Ready(());
}
}
}
None => match ready!(body.as_mut().poll_next(cx)) {
None => {
if let Err(e) = stream.send_data(Bytes::new(), true)
{
warn!("{:?}", e);
}
return Poll::Ready(()); return Poll::Ready(());
} else if !buffer.is_empty() {
let cap = std::cmp::min(buffer.len(), CHUNK_SIZE);
stream.reserve_capacity(cap);
} else {
this.buffer.take();
} }
}
Poll::Ready(Some(Err(e))) => { Some(Ok(chunk)) => {
warn!("{:?}", e); stream.reserve_capacity(cmp::min(
return Poll::Ready(()); chunk.len(),
} CHUNK_SIZE,
} ));
} else { *this.buffer = Some(chunk);
match body.as_mut().poll_next(cx) {
Poll::Pending => return Poll::Pending,
Poll::Ready(None) => {
if let Err(e) = stream.send_data(Bytes::new(), true) {
warn!("{:?}", e);
} }
return Poll::Ready(());
} Some(Err(e)) => {
Poll::Ready(Some(Ok(chunk))) => { error!("Response payload stream error: {:?}", e);
stream.reserve_capacity(std::cmp::min( return Poll::Ready(());
chunk.len(), }
CHUNK_SIZE, },
));
*this.buffer = Some(chunk);
}
Poll::Ready(Some(Err(e))) => {
error!("Response payload stream error: {:?}", e);
return Poll::Ready(());
}
} }
} }
} }
}, }
} }
} }
} }

View File

@ -1,9 +1,12 @@
//! HTTP/2 implementation //! HTTP/2 implementation.
use std::pin::Pin;
use std::task::{Context, Poll}; use std::{
pin::Pin,
task::{Context, Poll},
};
use bytes::Bytes; use bytes::Bytes;
use futures_core::Stream; use futures_core::{ready, Stream};
use h2::RecvStream; use h2::RecvStream;
mod dispatcher; mod dispatcher;
@ -13,14 +16,14 @@ pub use self::dispatcher::Dispatcher;
pub use self::service::H2Service; pub use self::service::H2Service;
use crate::error::PayloadError; use crate::error::PayloadError;
/// H2 receive stream /// HTTP/2 peer stream.
pub struct Payload { pub struct Payload {
pl: RecvStream, stream: RecvStream,
} }
impl Payload { impl Payload {
pub(crate) fn new(pl: RecvStream) -> Self { pub(crate) fn new(stream: RecvStream) -> Self {
Self { pl } Self { stream }
} }
} }
@ -33,18 +36,17 @@ impl Stream for Payload {
) -> Poll<Option<Self::Item>> { ) -> Poll<Option<Self::Item>> {
let this = self.get_mut(); let this = self.get_mut();
match Pin::new(&mut this.pl).poll_data(cx) { match ready!(Pin::new(&mut this.stream).poll_data(cx)) {
Poll::Ready(Some(Ok(chunk))) => { Some(Ok(chunk)) => {
let len = chunk.len(); let len = chunk.len();
if let Err(err) = this.pl.flow_control().release_capacity(len) {
Poll::Ready(Some(Err(err.into()))) match this.stream.flow_control().release_capacity(len) {
} else { Ok(()) => Poll::Ready(Some(Ok(chunk))),
Poll::Ready(Some(Ok(chunk))) Err(err) => Poll::Ready(Some(Err(err.into()))),
} }
} }
Poll::Ready(Some(Err(err))) => Poll::Ready(Some(Err(err.into()))), Some(Err(err)) => Poll::Ready(Some(Err(err.into()))),
Poll::Pending => Poll::Pending, None => Poll::Ready(None),
Poll::Ready(None) => Poll::Ready(None),
} }
} }
} }

View File

@ -2,7 +2,7 @@ use std::future::Future;
use std::marker::PhantomData; use std::marker::PhantomData;
use std::pin::Pin; use std::pin::Pin;
use std::task::{Context, Poll}; use std::task::{Context, Poll};
use std::{net, rc}; use std::{net, rc::Rc};
use actix_codec::{AsyncRead, AsyncWrite}; use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::net::TcpStream; use actix_rt::net::TcpStream;
@ -17,68 +17,65 @@ use h2::server::{self, Handshake};
use log::error; use log::error;
use crate::body::MessageBody; use crate::body::MessageBody;
use crate::cloneable::CloneableService;
use crate::config::ServiceConfig; use crate::config::ServiceConfig;
use crate::error::{DispatchError, Error}; use crate::error::{DispatchError, Error};
use crate::helpers::DataFactory;
use crate::request::Request; use crate::request::Request;
use crate::response::Response; use crate::response::Response;
use crate::service::HttpFlow;
use crate::{ConnectCallback, OnConnectData};
use super::dispatcher::Dispatcher; use super::dispatcher::Dispatcher;
/// `ServiceFactory` implementation for HTTP2 transport /// `ServiceFactory` implementation for HTTP/2 transport
pub struct H2Service<T, S, B> { pub struct H2Service<T, S, B> {
srv: S, srv: S,
cfg: ServiceConfig, cfg: ServiceConfig,
on_connect: Option<rc::Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>, on_connect_ext: Option<Rc<ConnectCallback<T>>>,
_t: PhantomData<(T, B)>, _phantom: PhantomData<(T, B)>,
} }
impl<T, S, B> H2Service<T, S, B> impl<T, S, B> H2Service<T, S, B>
where where
S: ServiceFactory<Config = (), Request = Request>, S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Error> + 'static, S::Error: Into<Error> + 'static,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static, <S::Service as Service<Request>>::Future: 'static,
B: MessageBody + 'static, B: MessageBody + 'static,
{ {
/// Create new `HttpService` instance with config. /// Create new `H2Service` instance with config.
pub(crate) fn with_config<F: IntoServiceFactory<S>>( pub(crate) fn with_config<F: IntoServiceFactory<S, Request>>(
cfg: ServiceConfig, cfg: ServiceConfig,
service: F, service: F,
) -> Self { ) -> Self {
H2Service { H2Service {
cfg, cfg,
on_connect: None, on_connect_ext: None,
srv: service.into_factory(), srv: service.into_factory(),
_t: PhantomData, _phantom: PhantomData,
} }
} }
/// Set on connect callback. /// Set on connect callback.
pub(crate) fn on_connect( pub(crate) fn on_connect_ext(mut self, f: Option<Rc<ConnectCallback<T>>>) -> Self {
mut self, self.on_connect_ext = f;
f: Option<rc::Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
) -> Self {
self.on_connect = f;
self self
} }
} }
impl<S, B> H2Service<TcpStream, S, B> impl<S, B> H2Service<TcpStream, S, B>
where where
S: ServiceFactory<Config = (), Request = Request>, S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Error> + 'static, S::Error: Into<Error> + 'static,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static, <S::Service as Service<Request>>::Future: 'static,
B: MessageBody + 'static, B: MessageBody + 'static,
{ {
/// Create simple tcp based service /// Create plain TCP based service
pub fn tcp( pub fn tcp(
self, self,
) -> impl ServiceFactory< ) -> impl ServiceFactory<
TcpStream,
Config = (), Config = (),
Request = TcpStream,
Response = (), Response = (),
Error = DispatchError, Error = DispatchError,
InitError = S::InitError, InitError = S::InitError,
@ -95,34 +92,34 @@ where
#[cfg(feature = "openssl")] #[cfg(feature = "openssl")]
mod openssl { mod openssl {
use actix_service::{fn_factory, fn_service}; use actix_service::{fn_factory, fn_service, ServiceFactoryExt};
use actix_tls::openssl::{Acceptor, SslAcceptor, SslStream}; use actix_tls::accept::openssl::{Acceptor, SslAcceptor, SslError, SslStream};
use actix_tls::{openssl::HandshakeError, SslError}; use actix_tls::accept::TlsError;
use super::*; use super::*;
impl<S, B> H2Service<SslStream<TcpStream>, S, B> impl<S, B> H2Service<SslStream<TcpStream>, S, B>
where where
S: ServiceFactory<Config = (), Request = Request>, S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Error> + 'static, S::Error: Into<Error> + 'static,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static, <S::Service as Service<Request>>::Future: 'static,
B: MessageBody + 'static, B: MessageBody + 'static,
{ {
/// Create ssl based service /// Create OpenSSL based service
pub fn openssl( pub fn openssl(
self, self,
acceptor: SslAcceptor, acceptor: SslAcceptor,
) -> impl ServiceFactory< ) -> impl ServiceFactory<
TcpStream,
Config = (), Config = (),
Request = TcpStream,
Response = (), Response = (),
Error = SslError<HandshakeError<TcpStream>, DispatchError>, Error = TlsError<SslError, DispatchError>,
InitError = S::InitError, InitError = S::InitError,
> { > {
pipeline_factory( pipeline_factory(
Acceptor::new(acceptor) Acceptor::new(acceptor)
.map_err(SslError::Ssl) .map_err(TlsError::Tls)
.map_init_err(|_| panic!()), .map_init_err(|_| panic!()),
) )
.and_then(fn_factory(|| { .and_then(fn_factory(|| {
@ -131,7 +128,7 @@ mod openssl {
ok((io, peer_addr)) ok((io, peer_addr))
})) }))
})) }))
.and_then(self.map_err(SslError::Service)) .and_then(self.map_err(TlsError::Service))
} }
} }
} }
@ -139,27 +136,28 @@ mod openssl {
#[cfg(feature = "rustls")] #[cfg(feature = "rustls")]
mod rustls { mod rustls {
use super::*; use super::*;
use actix_tls::rustls::{Acceptor, ServerConfig, TlsStream}; use actix_service::ServiceFactoryExt;
use actix_tls::SslError; use actix_tls::accept::rustls::{Acceptor, ServerConfig, TlsStream};
use actix_tls::accept::TlsError;
use std::io; use std::io;
impl<S, B> H2Service<TlsStream<TcpStream>, S, B> impl<S, B> H2Service<TlsStream<TcpStream>, S, B>
where where
S: ServiceFactory<Config = (), Request = Request>, S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Error> + 'static, S::Error: Into<Error> + 'static,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static, <S::Service as Service<Request>>::Future: 'static,
B: MessageBody + 'static, B: MessageBody + 'static,
{ {
/// Create openssl based service /// Create Rustls based service
pub fn rustls( pub fn rustls(
self, self,
mut config: ServerConfig, mut config: ServerConfig,
) -> impl ServiceFactory< ) -> impl ServiceFactory<
TcpStream,
Config = (), Config = (),
Request = TcpStream,
Response = (), Response = (),
Error = SslError<io::Error, DispatchError>, Error = TlsError<io::Error, DispatchError>,
InitError = S::InitError, InitError = S::InitError,
> { > {
let protos = vec!["h2".to_string().into()]; let protos = vec!["h2".to_string().into()];
@ -167,7 +165,7 @@ mod rustls {
pipeline_factory( pipeline_factory(
Acceptor::new(config) Acceptor::new(config)
.map_err(SslError::Ssl) .map_err(TlsError::Tls)
.map_init_err(|_| panic!()), .map_init_err(|_| panic!()),
) )
.and_then(fn_factory(|| { .and_then(fn_factory(|| {
@ -176,55 +174,57 @@ mod rustls {
ok((io, peer_addr)) ok((io, peer_addr))
})) }))
})) }))
.and_then(self.map_err(SslError::Service)) .and_then(self.map_err(TlsError::Service))
} }
} }
} }
impl<T, S, B> ServiceFactory for H2Service<T, S, B> impl<T, S, B> ServiceFactory<(T, Option<net::SocketAddr>)> for H2Service<T, S, B>
where where
T: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin,
S: ServiceFactory<Config = (), Request = Request>, S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Error> + 'static, S::Error: Into<Error> + 'static,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static, <S::Service as Service<Request>>::Future: 'static,
B: MessageBody + 'static, B: MessageBody + 'static,
{ {
type Config = ();
type Request = (T, Option<net::SocketAddr>);
type Response = (); type Response = ();
type Error = DispatchError; type Error = DispatchError;
type InitError = S::InitError; type Config = ();
type Service = H2ServiceHandler<T, S::Service, B>; type Service = H2ServiceHandler<T, S::Service, B>;
type InitError = S::InitError;
type Future = H2ServiceResponse<T, S, B>; type Future = H2ServiceResponse<T, S, B>;
fn new_service(&self, _: ()) -> Self::Future { fn new_service(&self, _: ()) -> Self::Future {
H2ServiceResponse { H2ServiceResponse {
fut: self.srv.new_service(()), fut: self.srv.new_service(()),
cfg: Some(self.cfg.clone()), cfg: Some(self.cfg.clone()),
on_connect: self.on_connect.clone(), on_connect_ext: self.on_connect_ext.clone(),
_t: PhantomData, _phantom: PhantomData,
} }
} }
} }
#[doc(hidden)] #[doc(hidden)]
#[pin_project::pin_project] #[pin_project::pin_project]
pub struct H2ServiceResponse<T, S: ServiceFactory, B> { pub struct H2ServiceResponse<T, S, B>
where
S: ServiceFactory<Request>,
{
#[pin] #[pin]
fut: S::Future, fut: S::Future,
cfg: Option<ServiceConfig>, cfg: Option<ServiceConfig>,
on_connect: Option<rc::Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>, on_connect_ext: Option<Rc<ConnectCallback<T>>>,
_t: PhantomData<(T, B)>, _phantom: PhantomData<B>,
} }
impl<T, S, B> Future for H2ServiceResponse<T, S, B> impl<T, S, B> Future for H2ServiceResponse<T, S, B>
where where
T: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin,
S: ServiceFactory<Config = (), Request = Request>, S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Error> + 'static, S::Error: Into<Error> + 'static,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static, <S::Service as Service<Request>>::Future: 'static,
B: MessageBody + 'static, B: MessageBody + 'static,
{ {
type Output = Result<H2ServiceHandler<T, S::Service, B>, S::InitError>; type Output = Result<H2ServiceHandler<T, S::Service, B>, S::InitError>;
@ -232,28 +232,31 @@ where
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> { fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.as_mut().project(); let this = self.as_mut().project();
Poll::Ready(ready!(this.fut.poll(cx)).map(|service| { this.fut.poll(cx).map_ok(|service| {
let this = self.as_mut().project(); let this = self.as_mut().project();
H2ServiceHandler::new( H2ServiceHandler::new(
this.cfg.take().unwrap(), this.cfg.take().unwrap(),
this.on_connect.clone(), this.on_connect_ext.clone(),
service, service,
) )
})) })
} }
} }
/// `Service` implementation for http/2 transport /// `Service` implementation for http/2 transport
pub struct H2ServiceHandler<T, S: Service, B> { pub struct H2ServiceHandler<T, S, B>
srv: CloneableService<S>, where
S: Service<Request>,
{
flow: Rc<HttpFlow<S, (), ()>>,
cfg: ServiceConfig, cfg: ServiceConfig,
on_connect: Option<rc::Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>, on_connect_ext: Option<Rc<ConnectCallback<T>>>,
_t: PhantomData<(T, B)>, _phantom: PhantomData<B>,
} }
impl<T, S, B> H2ServiceHandler<T, S, B> impl<T, S, B> H2ServiceHandler<T, S, B>
where where
S: Service<Request = Request>, S: Service<Request>,
S::Error: Into<Error> + 'static, S::Error: Into<Error> + 'static,
S::Future: 'static, S::Future: 'static,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
@ -261,70 +264,66 @@ where
{ {
fn new( fn new(
cfg: ServiceConfig, cfg: ServiceConfig,
on_connect: Option<rc::Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>, on_connect_ext: Option<Rc<ConnectCallback<T>>>,
srv: S, service: S,
) -> H2ServiceHandler<T, S, B> { ) -> H2ServiceHandler<T, S, B> {
H2ServiceHandler { H2ServiceHandler {
flow: HttpFlow::new(service, (), None),
cfg, cfg,
on_connect, on_connect_ext,
srv: CloneableService::new(srv), _phantom: PhantomData,
_t: PhantomData,
} }
} }
} }
impl<T, S, B> Service for H2ServiceHandler<T, S, B> impl<T, S, B> Service<(T, Option<net::SocketAddr>)> for H2ServiceHandler<T, S, B>
where where
T: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request = Request>, S: Service<Request>,
S::Error: Into<Error> + 'static, S::Error: Into<Error> + 'static,
S::Future: 'static, S::Future: 'static,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
B: MessageBody + 'static, B: MessageBody + 'static,
{ {
type Request = (T, Option<net::SocketAddr>);
type Response = (); type Response = ();
type Error = DispatchError; type Error = DispatchError;
type Future = H2ServiceHandlerResponse<T, S, B>; type Future = H2ServiceHandlerResponse<T, S, B>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> { fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.srv.poll_ready(cx).map_err(|e| { self.flow.service.poll_ready(cx).map_err(|e| {
let e = e.into(); let e = e.into();
error!("Service readiness error: {:?}", e); error!("Service readiness error: {:?}", e);
DispatchError::Service(e) DispatchError::Service(e)
}) })
} }
fn call(&mut self, (io, addr): Self::Request) -> Self::Future { fn call(&self, (io, addr): (T, Option<net::SocketAddr>)) -> Self::Future {
let on_connect = if let Some(ref on_connect) = self.on_connect { let on_connect_data =
Some(on_connect(&io)) OnConnectData::from_io(&io, self.on_connect_ext.as_deref());
} else {
None
};
H2ServiceHandlerResponse { H2ServiceHandlerResponse {
state: State::Handshake( state: State::Handshake(
Some(self.srv.clone()), Some(self.flow.clone()),
Some(self.cfg.clone()), Some(self.cfg.clone()),
addr, addr,
on_connect, on_connect_data,
server::handshake(io), server::handshake(io),
), ),
} }
} }
} }
enum State<T, S: Service<Request = Request>, B: MessageBody> enum State<T, S: Service<Request>, B: MessageBody>
where where
T: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin,
S::Future: 'static, S::Future: 'static,
{ {
Incoming(Dispatcher<T, S, B>), Incoming(Dispatcher<T, S, B, (), ()>),
Handshake( Handshake(
Option<CloneableService<S>>, Option<Rc<HttpFlow<S, (), ()>>>,
Option<ServiceConfig>, Option<ServiceConfig>,
Option<net::SocketAddr>, Option<net::SocketAddr>,
Option<Box<dyn DataFactory>>, OnConnectData,
Handshake<T, Bytes>, Handshake<T, Bytes>,
), ),
} }
@ -332,7 +331,7 @@ where
pub struct H2ServiceHandlerResponse<T, S, B> pub struct H2ServiceHandlerResponse<T, S, B>
where where
T: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request = Request>, S: Service<Request>,
S::Error: Into<Error> + 'static, S::Error: Into<Error> + 'static,
S::Future: 'static, S::Future: 'static,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
@ -344,7 +343,7 @@ where
impl<T, S, B> Future for H2ServiceHandlerResponse<T, S, B> impl<T, S, B> Future for H2ServiceHandlerResponse<T, S, B>
where where
T: AsyncRead + AsyncWrite + Unpin, T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request = Request>, S: Service<Request>,
S::Error: Into<Error> + 'static, S::Error: Into<Error> + 'static,
S::Future: 'static, S::Future: 'static,
S::Response: Into<Response<B>> + 'static, S::Response: Into<Response<B>> + 'static,
@ -359,25 +358,25 @@ where
ref mut srv, ref mut srv,
ref mut config, ref mut config,
ref peer_addr, ref peer_addr,
ref mut on_connect, ref mut on_connect_data,
ref mut handshake, ref mut handshake,
) => match Pin::new(handshake).poll(cx) { ) => match ready!(Pin::new(handshake).poll(cx)) {
Poll::Ready(Ok(conn)) => { Ok(conn) => {
let on_connect_data = std::mem::take(on_connect_data);
self.state = State::Incoming(Dispatcher::new( self.state = State::Incoming(Dispatcher::new(
srv.take().unwrap(), srv.take().unwrap(),
conn, conn,
on_connect.take(), on_connect_data,
config.take().unwrap(), config.take().unwrap(),
None, None,
*peer_addr, *peer_addr,
)); ));
self.poll(cx) self.poll(cx)
} }
Poll::Ready(Err(err)) => { Err(err) => {
trace!("H2 handshake error: {}", err); trace!("H2 handshake error: {}", err);
Poll::Ready(Err(err.into())) Poll::Ready(Err(err.into()))
} }
Poll::Pending => Poll::Pending,
}, },
} }
} }

View File

@ -0,0 +1,46 @@
//! Helper trait for types that can be effectively borrowed as a [HeaderValue].
use std::{borrow::Cow, str::FromStr};
use http::header::{HeaderName, InvalidHeaderName};
pub trait AsHeaderName: Sealed {}
pub trait Sealed {
fn try_as_name(&self) -> Result<Cow<'_, HeaderName>, InvalidHeaderName>;
}
impl Sealed for HeaderName {
fn try_as_name(&self) -> Result<Cow<'_, HeaderName>, InvalidHeaderName> {
Ok(Cow::Borrowed(self))
}
}
impl AsHeaderName for HeaderName {}
impl Sealed for &HeaderName {
fn try_as_name(&self) -> Result<Cow<'_, HeaderName>, InvalidHeaderName> {
Ok(Cow::Borrowed(*self))
}
}
impl AsHeaderName for &HeaderName {}
impl Sealed for &str {
fn try_as_name(&self) -> Result<Cow<'_, HeaderName>, InvalidHeaderName> {
HeaderName::from_str(self).map(Cow::Owned)
}
}
impl AsHeaderName for &str {}
impl Sealed for String {
fn try_as_name(&self) -> Result<Cow<'_, HeaderName>, InvalidHeaderName> {
HeaderName::from_str(self).map(Cow::Owned)
}
}
impl AsHeaderName for String {}
impl Sealed for &String {
fn try_as_name(&self) -> Result<Cow<'_, HeaderName>, InvalidHeaderName> {
HeaderName::from_str(self).map(Cow::Owned)
}
}
impl AsHeaderName for &String {}

View File

@ -1,3 +1,5 @@
use std::cmp::Ordering;
use mime::Mime; use mime::Mime;
use crate::header::{qitem, QualityItem}; use crate::header::{qitem, QualityItem};
@ -7,7 +9,7 @@ header! {
/// `Accept` header, defined in [RFC7231](http://tools.ietf.org/html/rfc7231#section-5.3.2) /// `Accept` header, defined in [RFC7231](http://tools.ietf.org/html/rfc7231#section-5.3.2)
/// ///
/// The `Accept` header field can be used by user agents to specify /// The `Accept` header field can be used by user agents to specify
/// response media types that are acceptable. Accept header fields can /// response media types that are acceptable. Accept header fields can
/// be used to indicate that the request is specifically limited to a /// be used to indicate that the request is specifically limited to a
/// small set of desired types, as in the case of a request for an /// small set of desired types, as in the case of a request for an
/// in-line image /// in-line image
@ -30,50 +32,36 @@ header! {
/// * `text/plain; q=0.5, text/html, text/x-dvi; q=0.8, text/x-c` /// * `text/plain; q=0.5, text/html, text/x-dvi; q=0.8, text/x-c`
/// ///
/// # Examples /// # Examples
/// ```rust /// ```
/// # extern crate actix_http;
/// extern crate mime;
/// use actix_http::Response; /// use actix_http::Response;
/// use actix_http::http::header::{Accept, qitem}; /// use actix_http::http::header::{Accept, qitem};
/// ///
/// # fn main() {
/// let mut builder = Response::Ok(); /// let mut builder = Response::Ok();
/// /// builder.insert_header(
/// builder.set(
/// Accept(vec![ /// Accept(vec![
/// qitem(mime::TEXT_HTML), /// qitem(mime::TEXT_HTML),
/// ]) /// ])
/// ); /// );
/// # }
/// ``` /// ```
/// ///
/// ```rust /// ```
/// # extern crate actix_http;
/// extern crate mime;
/// use actix_http::Response; /// use actix_http::Response;
/// use actix_http::http::header::{Accept, qitem}; /// use actix_http::http::header::{Accept, qitem};
/// ///
/// # fn main() {
/// let mut builder = Response::Ok(); /// let mut builder = Response::Ok();
/// /// builder.insert_header(
/// builder.set(
/// Accept(vec![ /// Accept(vec![
/// qitem(mime::APPLICATION_JSON), /// qitem(mime::APPLICATION_JSON),
/// ]) /// ])
/// ); /// );
/// # }
/// ``` /// ```
/// ///
/// ```rust /// ```
/// # extern crate actix_http;
/// extern crate mime;
/// use actix_http::Response; /// use actix_http::Response;
/// use actix_http::http::header::{Accept, QualityItem, q, qitem}; /// use actix_http::http::header::{Accept, QualityItem, q, qitem};
/// ///
/// # fn main() {
/// let mut builder = Response::Ok(); /// let mut builder = Response::Ok();
/// /// builder.insert_header(
/// builder.set(
/// Accept(vec![ /// Accept(vec![
/// qitem(mime::TEXT_HTML), /// qitem(mime::TEXT_HTML),
/// qitem("application/xhtml+xml".parse().unwrap()), /// qitem("application/xhtml+xml".parse().unwrap()),
@ -88,7 +76,6 @@ header! {
/// ), /// ),
/// ]) /// ])
/// ); /// );
/// # }
/// ``` /// ```
(Accept, header::ACCEPT) => (QualityItem<Mime>)+ (Accept, header::ACCEPT) => (QualityItem<Mime>)+
@ -97,14 +84,14 @@ header! {
test_header!( test_header!(
test1, test1,
vec![b"audio/*; q=0.2, audio/basic"], vec![b"audio/*; q=0.2, audio/basic"],
Some(HeaderField(vec![ Some(Accept(vec![
QualityItem::new("audio/*".parse().unwrap(), q(200)), QualityItem::new("audio/*".parse().unwrap(), q(200)),
qitem("audio/basic".parse().unwrap()), qitem("audio/basic".parse().unwrap()),
]))); ])));
test_header!( test_header!(
test2, test2,
vec![b"text/plain; q=0.5, text/html, text/x-dvi; q=0.8, text/x-c"], vec![b"text/plain; q=0.5, text/html, text/x-dvi; q=0.8, text/x-c"],
Some(HeaderField(vec![ Some(Accept(vec![
QualityItem::new(mime::TEXT_PLAIN, q(500)), QualityItem::new(mime::TEXT_PLAIN, q(500)),
qitem(mime::TEXT_HTML), qitem(mime::TEXT_HTML),
QualityItem::new( QualityItem::new(
@ -130,7 +117,7 @@ header! {
#[test] #[test]
fn test_fuzzing1() { fn test_fuzzing1() {
use crate::test::TestRequest; use crate::test::TestRequest;
let req = TestRequest::with_header(crate::header::ACCEPT, "chunk#;e").finish(); let req = TestRequest::default().insert_header((crate::header::ACCEPT, "chunk#;e")).finish();
let header = Accept::parse(&req); let header = Accept::parse(&req);
assert!(header.is_ok()); assert!(header.is_ok());
} }
@ -138,23 +125,148 @@ header! {
} }
impl Accept { impl Accept {
/// A constructor to easily create `Accept: */*`. /// Construct `Accept: */*`.
pub fn star() -> Accept { pub fn star() -> Accept {
Accept(vec![qitem(mime::STAR_STAR)]) Accept(vec![qitem(mime::STAR_STAR)])
} }
/// A constructor to easily create `Accept: application/json`. /// Construct `Accept: application/json`.
pub fn json() -> Accept { pub fn json() -> Accept {
Accept(vec![qitem(mime::APPLICATION_JSON)]) Accept(vec![qitem(mime::APPLICATION_JSON)])
} }
/// A constructor to easily create `Accept: text/*`. /// Construct `Accept: text/*`.
pub fn text() -> Accept { pub fn text() -> Accept {
Accept(vec![qitem(mime::TEXT_STAR)]) Accept(vec![qitem(mime::TEXT_STAR)])
} }
/// A constructor to easily create `Accept: image/*`. /// Construct `Accept: image/*`.
pub fn image() -> Accept { pub fn image() -> Accept {
Accept(vec![qitem(mime::IMAGE_STAR)]) Accept(vec![qitem(mime::IMAGE_STAR)])
} }
/// Construct `Accept: text/html`.
pub fn html() -> Accept {
Accept(vec![qitem(mime::TEXT_HTML)])
}
/// Returns a sorted list of mime types from highest to lowest preference, accounting for
/// [q-factor weighting] and specificity.
///
/// [q-factor weighting]: https://tools.ietf.org/html/rfc7231#section-5.3.2
pub fn mime_precedence(&self) -> Vec<Mime> {
let mut types = self.0.clone();
// use stable sort so items with equal q-factor and specificity retain listed order
types.sort_by(|a, b| {
// sort by q-factor descending
b.quality.cmp(&a.quality).then_with(|| {
// use specificity rules on mime types with
// same q-factor (eg. text/html > text/* > */*)
// subtypes are not comparable if main type is star, so return
match (a.item.type_(), b.item.type_()) {
(mime::STAR, mime::STAR) => return Ordering::Equal,
// a is sorted after b
(mime::STAR, _) => return Ordering::Greater,
// a is sorted before b
(_, mime::STAR) => return Ordering::Less,
_ => {}
}
// in both these match expressions, the returned ordering appears
// inverted because sort is high-to-low ("descending") precedence
match (a.item.subtype(), b.item.subtype()) {
(mime::STAR, mime::STAR) => Ordering::Equal,
// a is sorted after b
(mime::STAR, _) => Ordering::Greater,
// a is sorted before b
(_, mime::STAR) => Ordering::Less,
_ => Ordering::Equal,
}
})
});
types.into_iter().map(|qitem| qitem.item).collect()
}
/// Extracts the most preferable mime type, accounting for [q-factor weighting].
///
/// If no q-factors are provided, the first mime type is chosen. Note that items without
/// q-factors are given the maximum preference value.
///
/// Returns `None` if contained list is empty.
///
/// [q-factor weighting]: https://tools.ietf.org/html/rfc7231#section-5.3.2
pub fn mime_preference(&self) -> Option<Mime> {
let types = self.mime_precedence();
types.first().cloned()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::header::q;
#[test]
fn test_mime_precedence() {
let test = Accept(vec![]);
assert!(test.mime_precedence().is_empty());
let test = Accept(vec![qitem(mime::APPLICATION_JSON)]);
assert_eq!(test.mime_precedence(), vec!(mime::APPLICATION_JSON));
let test = Accept(vec![
qitem(mime::TEXT_HTML),
"application/xhtml+xml".parse().unwrap(),
QualityItem::new("application/xml".parse().unwrap(), q(0.9)),
QualityItem::new(mime::STAR_STAR, q(0.8)),
]);
assert_eq!(
test.mime_precedence(),
vec![
mime::TEXT_HTML,
"application/xhtml+xml".parse().unwrap(),
"application/xml".parse().unwrap(),
mime::STAR_STAR,
]
);
let test = Accept(vec![
qitem(mime::STAR_STAR),
qitem(mime::IMAGE_STAR),
qitem(mime::IMAGE_PNG),
]);
assert_eq!(
test.mime_precedence(),
vec![mime::IMAGE_PNG, mime::IMAGE_STAR, mime::STAR_STAR]
);
}
#[test]
fn test_mime_preference() {
let test = Accept(vec![
qitem(mime::TEXT_HTML),
"application/xhtml+xml".parse().unwrap(),
QualityItem::new("application/xml".parse().unwrap(), q(0.9)),
QualityItem::new(mime::STAR_STAR, q(0.8)),
]);
assert_eq!(test.mime_preference(), Some(mime::TEXT_HTML));
let test = Accept(vec![
QualityItem::new("video/*".parse().unwrap(), q(0.8)),
qitem(mime::IMAGE_PNG),
QualityItem::new(mime::STAR_STAR, q(0.5)),
qitem(mime::IMAGE_SVG),
QualityItem::new(mime::IMAGE_STAR, q(0.8)),
]);
assert_eq!(test.mime_preference(), Some(mime::IMAGE_PNG));
}
} }

View File

@ -21,44 +21,37 @@ header! {
/// * `iso-8859-5, unicode-1-1;q=0.8` /// * `iso-8859-5, unicode-1-1;q=0.8`
/// ///
/// # Examples /// # Examples
/// ```rust /// ```
/// # extern crate actix_http;
/// use actix_http::Response; /// use actix_http::Response;
/// use actix_http::http::header::{AcceptCharset, Charset, qitem}; /// use actix_http::http::header::{AcceptCharset, Charset, qitem};
/// ///
/// # fn main() {
/// let mut builder = Response::Ok(); /// let mut builder = Response::Ok();
/// builder.set( /// builder.insert_header(
/// AcceptCharset(vec![qitem(Charset::Us_Ascii)]) /// AcceptCharset(vec![qitem(Charset::Us_Ascii)])
/// ); /// );
/// # }
/// ``` /// ```
/// ```rust ///
/// # extern crate actix_http; /// ```
/// use actix_http::Response; /// use actix_http::Response;
/// use actix_http::http::header::{AcceptCharset, Charset, q, QualityItem}; /// use actix_http::http::header::{AcceptCharset, Charset, q, QualityItem};
/// ///
/// # fn main() {
/// let mut builder = Response::Ok(); /// let mut builder = Response::Ok();
/// builder.set( /// builder.insert_header(
/// AcceptCharset(vec![ /// AcceptCharset(vec![
/// QualityItem::new(Charset::Us_Ascii, q(900)), /// QualityItem::new(Charset::Us_Ascii, q(900)),
/// QualityItem::new(Charset::Iso_8859_10, q(200)), /// QualityItem::new(Charset::Iso_8859_10, q(200)),
/// ]) /// ])
/// ); /// );
/// # }
/// ``` /// ```
/// ```rust ///
/// # extern crate actix_http; /// ```
/// use actix_http::Response; /// use actix_http::Response;
/// use actix_http::http::header::{AcceptCharset, Charset, qitem}; /// use actix_http::http::header::{AcceptCharset, Charset, qitem};
/// ///
/// # fn main() {
/// let mut builder = Response::Ok(); /// let mut builder = Response::Ok();
/// builder.set( /// builder.insert_header(
/// AcceptCharset(vec![qitem(Charset::Ext("utf-8".to_owned()))]) /// AcceptCharset(vec![qitem(Charset::Ext("utf-8".to_owned()))])
/// ); /// );
/// # }
/// ``` /// ```
(AcceptCharset, ACCEPT_CHARSET) => (QualityItem<Charset>)+ (AcceptCharset, ACCEPT_CHARSET) => (QualityItem<Charset>)+

View File

@ -22,41 +22,35 @@ header! {
/// ///
/// # Examples /// # Examples
/// ///
/// ```rust /// ```
/// # extern crate actix_http; /// use language_tags::langtag;
/// # extern crate language_tags;
/// use actix_http::Response; /// use actix_http::Response;
/// use actix_http::http::header::{AcceptLanguage, LanguageTag, qitem}; /// use actix_http::http::header::{AcceptLanguage, LanguageTag, qitem};
/// ///
/// # fn main() {
/// let mut builder = Response::Ok(); /// let mut builder = Response::Ok();
/// let mut langtag: LanguageTag = Default::default(); /// let mut langtag: LanguageTag = Default::default();
/// langtag.language = Some("en".to_owned()); /// langtag.language = Some("en".to_owned());
/// langtag.region = Some("US".to_owned()); /// langtag.region = Some("US".to_owned());
/// builder.set( /// builder.insert_header(
/// AcceptLanguage(vec![ /// AcceptLanguage(vec![
/// qitem(langtag), /// qitem(langtag),
/// ]) /// ])
/// ); /// );
/// # }
/// ``` /// ```
/// ///
/// ```rust /// ```
/// # extern crate actix_http; /// use language_tags::langtag;
/// # #[macro_use] extern crate language_tags;
/// use actix_http::Response; /// use actix_http::Response;
/// use actix_http::http::header::{AcceptLanguage, QualityItem, q, qitem}; /// use actix_http::http::header::{AcceptLanguage, QualityItem, q, qitem};
/// # ///
/// # fn main() {
/// let mut builder = Response::Ok(); /// let mut builder = Response::Ok();
/// builder.set( /// builder.insert_header(
/// AcceptLanguage(vec![ /// AcceptLanguage(vec![
/// qitem(langtag!(da)), /// qitem(langtag!(da)),
/// QualityItem::new(langtag!(en;;;GB), q(800)), /// QualityItem::new(langtag!(en;;;GB), q(800)),
/// QualityItem::new(langtag!(en), q(700)), /// QualityItem::new(langtag!(en), q(700)),
/// ]) /// ])
/// ); /// );
/// # }
/// ``` /// ```
(AcceptLanguage, ACCEPT_LANGUAGE) => (QualityItem<LanguageTag>)+ (AcceptLanguage, ACCEPT_LANGUAGE) => (QualityItem<LanguageTag>)+

View File

@ -1,5 +1,5 @@
use http::Method;
use http::header; use http::header;
use http::Method;
header! { header! {
/// `Allow` header, defined in [RFC7231](http://tools.ietf.org/html/rfc7231#section-7.4.1) /// `Allow` header, defined in [RFC7231](http://tools.ietf.org/html/rfc7231#section-7.4.1)
@ -22,38 +22,28 @@ header! {
/// ///
/// # Examples /// # Examples
/// ///
/// ```rust /// ```
/// # extern crate http;
/// # extern crate actix_http;
/// use actix_http::Response; /// use actix_http::Response;
/// use actix_http::http::header::Allow; /// use actix_http::http::{header::Allow, Method};
/// use http::Method;
/// ///
/// # fn main() {
/// let mut builder = Response::Ok(); /// let mut builder = Response::Ok();
/// builder.set( /// builder.insert_header(
/// Allow(vec![Method::GET]) /// Allow(vec![Method::GET])
/// ); /// );
/// # }
/// ``` /// ```
/// ///
/// ```rust /// ```
/// # extern crate http;
/// # extern crate actix_http;
/// use actix_http::Response; /// use actix_http::Response;
/// use actix_http::http::header::Allow; /// use actix_http::http::{header::Allow, Method};
/// use http::Method;
/// ///
/// # fn main() {
/// let mut builder = Response::Ok(); /// let mut builder = Response::Ok();
/// builder.set( /// builder.insert_header(
/// Allow(vec![ /// Allow(vec![
/// Method::GET, /// Method::GET,
/// Method::POST, /// Method::POST,
/// Method::PATCH, /// Method::PATCH,
/// ]) /// ])
/// ); /// );
/// # }
/// ``` /// ```
(Allow, header::ALLOW) => (Method)* (Allow, header::ALLOW) => (Method)*

View File

@ -28,12 +28,12 @@ use crate::header::{
/// * `max-age=30` /// * `max-age=30`
/// ///
/// # Examples /// # Examples
/// ```rust /// ```
/// use actix_http::Response; /// use actix_http::Response;
/// use actix_http::http::header::{CacheControl, CacheDirective}; /// use actix_http::http::header::{CacheControl, CacheDirective};
/// ///
/// let mut builder = Response::Ok(); /// let mut builder = Response::Ok();
/// builder.set(CacheControl(vec![CacheDirective::MaxAge(86400u32)])); /// builder.insert_header(CacheControl(vec![CacheDirective::MaxAge(86400u32)]));
/// ``` /// ```
/// ///
/// ```rust /// ```rust
@ -41,7 +41,7 @@ use crate::header::{
/// use actix_http::http::header::{CacheControl, CacheDirective}; /// use actix_http::http::header::{CacheControl, CacheDirective};
/// ///
/// let mut builder = Response::Ok(); /// let mut builder = Response::Ok();
/// builder.set(CacheControl(vec![ /// builder.insert_header(CacheControl(vec![
/// CacheDirective::NoCache, /// CacheDirective::NoCache,
/// CacheDirective::Private, /// CacheDirective::Private,
/// CacheDirective::MaxAge(360u32), /// CacheDirective::MaxAge(360u32),
@ -82,7 +82,7 @@ impl fmt::Display for CacheControl {
impl IntoHeaderValue for CacheControl { impl IntoHeaderValue for CacheControl {
type Error = header::InvalidHeaderValue; type Error = header::InvalidHeaderValue;
fn try_into(self) -> Result<header::HeaderValue, Self::Error> { fn try_into_value(self) -> Result<header::HeaderValue, Self::Error> {
let mut writer = Writer::new(); let mut writer = Writer::new();
let _ = write!(&mut writer, "{}", self); let _ = write!(&mut writer, "{}", self);
header::HeaderValue::from_maybe_shared(writer.take()) header::HeaderValue::from_maybe_shared(writer.take())
@ -196,7 +196,8 @@ mod tests {
#[test] #[test]
fn test_parse_multiple_headers() { fn test_parse_multiple_headers() {
let req = TestRequest::with_header(header::CACHE_CONTROL, "no-cache, private") let req = TestRequest::default()
.insert_header((header::CACHE_CONTROL, "no-cache, private"))
.finish(); .finish();
let cache = Header::parse(&req); let cache = Header::parse(&req);
assert_eq!( assert_eq!(
@ -210,9 +211,9 @@ mod tests {
#[test] #[test]
fn test_parse_argument() { fn test_parse_argument() {
let req = let req = TestRequest::default()
TestRequest::with_header(header::CACHE_CONTROL, "max-age=100, private") .insert_header((header::CACHE_CONTROL, "max-age=100, private"))
.finish(); .finish();
let cache = Header::parse(&req); let cache = Header::parse(&req);
assert_eq!( assert_eq!(
cache.ok(), cache.ok(),
@ -225,8 +226,9 @@ mod tests {
#[test] #[test]
fn test_parse_quote_form() { fn test_parse_quote_form() {
let req = let req = TestRequest::default()
TestRequest::with_header(header::CACHE_CONTROL, "max-age=\"200\"").finish(); .insert_header((header::CACHE_CONTROL, "max-age=\"200\""))
.finish();
let cache = Header::parse(&req); let cache = Header::parse(&req);
assert_eq!( assert_eq!(
cache.ok(), cache.ok(),
@ -236,8 +238,9 @@ mod tests {
#[test] #[test]
fn test_parse_extension() { fn test_parse_extension() {
let req = let req = TestRequest::default()
TestRequest::with_header(header::CACHE_CONTROL, "foo, bar=baz").finish(); .insert_header((header::CACHE_CONTROL, "foo, bar=baz"))
.finish();
let cache = Header::parse(&req); let cache = Header::parse(&req);
assert_eq!( assert_eq!(
cache.ok(), cache.ok(),
@ -250,7 +253,9 @@ mod tests {
#[test] #[test]
fn test_parse_bad_syntax() { fn test_parse_bad_syntax() {
let req = TestRequest::with_header(header::CACHE_CONTROL, "foo=").finish(); let req = TestRequest::default()
.insert_header((header::CACHE_CONTROL, "foo="))
.finish();
let cache: Result<CacheControl, _> = Header::parse(&req); let cache: Result<CacheControl, _> = Header::parse(&req);
assert_eq!(cache.ok(), None) assert_eq!(cache.ok(), None)
} }

View File

@ -1,10 +1,10 @@
// # References //! # References
// //!
// "The Content-Disposition Header Field" https://www.ietf.org/rfc/rfc2183.txt //! "The Content-Disposition Header Field" https://www.ietf.org/rfc/rfc2183.txt
// "The Content-Disposition Header Field in the Hypertext Transfer Protocol (HTTP)" https://www.ietf.org/rfc/rfc6266.txt //! "The Content-Disposition Header Field in the Hypertext Transfer Protocol (HTTP)" https://www.ietf.org/rfc/rfc6266.txt
// "Returning Values from Forms: multipart/form-data" https://www.ietf.org/rfc/rfc7578.txt //! "Returning Values from Forms: multipart/form-data" https://www.ietf.org/rfc/rfc7578.txt
// Browser conformance tests at: http://greenbytes.de/tech/tc2231/ //! Browser conformance tests at: http://greenbytes.de/tech/tc2231/
// IANA assignment: http://www.iana.org/assignments/cont-disp/cont-disp.xhtml //! IANA assignment: http://www.iana.org/assignments/cont-disp/cont-disp.xhtml
use lazy_static::lazy_static; use lazy_static::lazy_static;
use regex::Regex; use regex::Regex;
@ -283,11 +283,11 @@ impl DispositionParam {
/// Some("\u{1f600}.svg".as_bytes())); /// Some("\u{1f600}.svg".as_bytes()));
/// ``` /// ```
/// ///
/// # WARN /// # Security Note
///
/// If "filename" parameter is supplied, do not use the file name blindly, check and possibly /// If "filename" parameter is supplied, do not use the file name blindly, check and possibly
/// change to match local file system conventions if applicable, and do not use directory path /// change to match local file system conventions if applicable, and do not use directory path
/// information that may be present. See [RFC2183](https://tools.ietf.org/html/rfc2183#section-2.3) /// information that may be present. See [RFC2183](https://tools.ietf.org/html/rfc2183#section-2.3).
/// .
#[derive(Clone, Debug, PartialEq)] #[derive(Clone, Debug, PartialEq)]
pub struct ContentDisposition { pub struct ContentDisposition {
/// The disposition type /// The disposition type
@ -318,9 +318,8 @@ impl ContentDisposition {
return Err(crate::error::ParseError::Header); return Err(crate::error::ParseError::Header);
} }
left = new_left; left = new_left;
if param_name.ends_with('*') { if let Some(param_name) = param_name.strip_suffix('*') {
// extended parameters // extended parameters
let param_name = &param_name[..param_name.len() - 1]; // trim asterisk
let (ext_value, new_left) = split_once_and_trim(left, ';'); let (ext_value, new_left) = split_once_and_trim(left, ';');
left = new_left; left = new_left;
let ext_value = header::parse_extended_value(ext_value)?; let ext_value = header::parse_extended_value(ext_value)?;
@ -387,26 +386,17 @@ impl ContentDisposition {
/// Returns `true` if it is [`Inline`](DispositionType::Inline). /// Returns `true` if it is [`Inline`](DispositionType::Inline).
pub fn is_inline(&self) -> bool { pub fn is_inline(&self) -> bool {
match self.disposition { matches!(self.disposition, DispositionType::Inline)
DispositionType::Inline => true,
_ => false,
}
} }
/// Returns `true` if it is [`Attachment`](DispositionType::Attachment). /// Returns `true` if it is [`Attachment`](DispositionType::Attachment).
pub fn is_attachment(&self) -> bool { pub fn is_attachment(&self) -> bool {
match self.disposition { matches!(self.disposition, DispositionType::Attachment)
DispositionType::Attachment => true,
_ => false,
}
} }
/// Returns `true` if it is [`FormData`](DispositionType::FormData). /// Returns `true` if it is [`FormData`](DispositionType::FormData).
pub fn is_form_data(&self) -> bool { pub fn is_form_data(&self) -> bool {
match self.disposition { matches!(self.disposition, DispositionType::FormData)
DispositionType::FormData => true,
_ => false,
}
} }
/// Returns `true` if it is [`Ext`](DispositionType::Ext) and the `disp_type` matches. /// Returns `true` if it is [`Ext`](DispositionType::Ext) and the `disp_type` matches.
@ -464,7 +454,7 @@ impl ContentDisposition {
impl IntoHeaderValue for ContentDisposition { impl IntoHeaderValue for ContentDisposition {
type Error = header::InvalidHeaderValue; type Error = header::InvalidHeaderValue;
fn try_into(self) -> Result<header::HeaderValue, Self::Error> { fn try_into_value(self) -> Result<header::HeaderValue, Self::Error> {
let mut writer = Writer::new(); let mut writer = Writer::new();
let _ = write!(&mut writer, "{}", self); let _ = write!(&mut writer, "{}", self);
header::HeaderValue::from_maybe_shared(writer.take()) header::HeaderValue::from_maybe_shared(writer.take())
@ -559,8 +549,7 @@ impl fmt::Display for ContentDisposition {
write!(f, "{}", self.disposition)?; write!(f, "{}", self.disposition)?;
self.parameters self.parameters
.iter() .iter()
.map(|param| write!(f, "; {}", param)) .try_for_each(|param| write!(f, "; {}", param))
.collect()
} }
} }

View File

@ -0,0 +1,106 @@
use std::{convert::Infallible, str::FromStr};
use http::header::InvalidHeaderValue;
use crate::{
error::ParseError,
header::{self, from_one_raw_str, Header, HeaderName, HeaderValue, IntoHeaderValue},
HttpMessage,
};
/// Represents a supported content encoding.
#[derive(Copy, Clone, PartialEq, Debug)]
pub enum ContentEncoding {
/// Automatically select encoding based on encoding negotiation.
Auto,
/// A format using the Brotli algorithm.
Br,
/// A format using the zlib structure with deflate algorithm.
Deflate,
/// Gzip algorithm.
Gzip,
/// Indicates the identity function (i.e. no compression, nor modification).
Identity,
}
impl ContentEncoding {
/// Is the content compressed?
#[inline]
pub fn is_compression(self) -> bool {
matches!(self, ContentEncoding::Identity | ContentEncoding::Auto)
}
/// Convert content encoding to string
#[inline]
pub fn as_str(self) -> &'static str {
match self {
ContentEncoding::Br => "br",
ContentEncoding::Gzip => "gzip",
ContentEncoding::Deflate => "deflate",
ContentEncoding::Identity | ContentEncoding::Auto => "identity",
}
}
/// Default Q-factor (quality) value.
#[inline]
pub fn quality(self) -> f64 {
match self {
ContentEncoding::Br => 1.1,
ContentEncoding::Gzip => 1.0,
ContentEncoding::Deflate => 0.9,
ContentEncoding::Identity | ContentEncoding::Auto => 0.1,
}
}
}
impl Default for ContentEncoding {
fn default() -> Self {
Self::Identity
}
}
impl FromStr for ContentEncoding {
type Err = Infallible;
fn from_str(val: &str) -> Result<Self, Self::Err> {
Ok(Self::from(val))
}
}
impl From<&str> for ContentEncoding {
fn from(val: &str) -> ContentEncoding {
let val = val.trim();
if val.eq_ignore_ascii_case("br") {
ContentEncoding::Br
} else if val.eq_ignore_ascii_case("gzip") {
ContentEncoding::Gzip
} else if val.eq_ignore_ascii_case("deflate") {
ContentEncoding::Deflate
} else {
ContentEncoding::default()
}
}
}
impl IntoHeaderValue for ContentEncoding {
type Error = InvalidHeaderValue;
fn try_into_value(self) -> Result<http::HeaderValue, Self::Error> {
Ok(HeaderValue::from_static(self.as_str()))
}
}
impl Header for ContentEncoding {
fn name() -> HeaderName {
header::CONTENT_ENCODING
}
fn parse<T: HttpMessage>(msg: &T) -> Result<Self, ParseError> {
from_one_raw_str(msg.headers().get(Self::name()))
}
}

View File

@ -23,38 +23,31 @@ header! {
/// ///
/// # Examples /// # Examples
/// ///
/// ```rust /// ```
/// # extern crate actix_http; /// use language_tags::langtag;
/// # #[macro_use] extern crate language_tags;
/// use actix_http::Response; /// use actix_http::Response;
/// # use actix_http::http::header::{ContentLanguage, qitem}; /// use actix_http::http::header::{ContentLanguage, qitem};
/// # ///
/// # fn main() {
/// let mut builder = Response::Ok(); /// let mut builder = Response::Ok();
/// builder.set( /// builder.insert_header(
/// ContentLanguage(vec![ /// ContentLanguage(vec![
/// qitem(langtag!(en)), /// qitem(langtag!(en)),
/// ]) /// ])
/// ); /// );
/// # }
/// ``` /// ```
/// ///
/// ```rust /// ```
/// # extern crate actix_http; /// use language_tags::langtag;
/// # #[macro_use] extern crate language_tags;
/// use actix_http::Response; /// use actix_http::Response;
/// # use actix_http::http::header::{ContentLanguage, qitem}; /// use actix_http::http::header::{ContentLanguage, qitem};
/// #
/// # fn main() {
/// ///
/// let mut builder = Response::Ok(); /// let mut builder = Response::Ok();
/// builder.set( /// builder.insert_header(
/// ContentLanguage(vec![ /// ContentLanguage(vec![
/// qitem(langtag!(da)), /// qitem(langtag!(da)),
/// qitem(langtag!(en;;;GB)), /// qitem(langtag!(en;;;GB)),
/// ]) /// ])
/// ); /// );
/// # }
/// ``` /// ```
(ContentLanguage, CONTENT_LANGUAGE) => (QualityItem<LanguageTag>)+ (ContentLanguage, CONTENT_LANGUAGE) => (QualityItem<LanguageTag>)+

View File

@ -200,7 +200,7 @@ impl Display for ContentRangeSpec {
impl IntoHeaderValue for ContentRangeSpec { impl IntoHeaderValue for ContentRangeSpec {
type Error = InvalidHeaderValue; type Error = InvalidHeaderValue;
fn try_into(self) -> Result<HeaderValue, Self::Error> { fn try_into_value(self) -> Result<HeaderValue, Self::Error> {
let mut writer = Writer::new(); let mut writer = Writer::new();
let _ = write!(&mut writer, "{}", self); let _ = write!(&mut writer, "{}", self);
HeaderValue::from_maybe_shared(writer.take()) HeaderValue::from_maybe_shared(writer.take())

View File

@ -30,31 +30,24 @@ header! {
/// ///
/// # Examples /// # Examples
/// ///
/// ```rust /// ```
/// use actix_http::Response; /// use actix_http::Response;
/// use actix_http::http::header::ContentType; /// use actix_http::http::header::ContentType;
/// ///
/// # fn main() {
/// let mut builder = Response::Ok(); /// let mut builder = Response::Ok();
/// builder.set( /// builder.insert_header(
/// ContentType::json() /// ContentType::json()
/// ); /// );
/// # }
/// ``` /// ```
/// ///
/// ```rust /// ```
/// # extern crate mime;
/// # extern crate actix_http;
/// use mime::TEXT_HTML;
/// use actix_http::Response; /// use actix_http::Response;
/// use actix_http::http::header::ContentType; /// use actix_http::http::header::ContentType;
/// ///
/// # fn main() {
/// let mut builder = Response::Ok(); /// let mut builder = Response::Ok();
/// builder.set( /// builder.insert_header(
/// ContentType(TEXT_HTML) /// ContentType(mime::TEXT_HTML)
/// ); /// );
/// # }
/// ``` /// ```
(ContentType, CONTENT_TYPE) => [Mime] (ContentType, CONTENT_TYPE) => [Mime]
@ -99,6 +92,7 @@ impl ContentType {
pub fn form_url_encoded() -> ContentType { pub fn form_url_encoded() -> ContentType {
ContentType(mime::APPLICATION_WWW_FORM_URLENCODED) ContentType(mime::APPLICATION_WWW_FORM_URLENCODED)
} }
/// A constructor to easily create a `Content-Type: image/jpeg` header. /// A constructor to easily create a `Content-Type: image/jpeg` header.
#[inline] #[inline]
pub fn jpeg() -> ContentType { pub fn jpeg() -> ContentType {

View File

@ -19,13 +19,15 @@ header! {
/// ///
/// # Example /// # Example
/// ///
/// ```rust /// ```
/// use std::time::SystemTime;
/// use actix_http::Response; /// use actix_http::Response;
/// use actix_http::http::header::Date; /// use actix_http::http::header::Date;
/// use std::time::SystemTime;
/// ///
/// let mut builder = Response::Ok(); /// let mut builder = Response::Ok();
/// builder.set(Date(SystemTime::now().into())); /// builder.insert_header(
/// Date(SystemTime::now().into())
/// );
/// ``` /// ```
(Date, DATE) => [HttpDate] (Date, DATE) => [HttpDate]

View File

@ -27,20 +27,24 @@ header! {
/// ///
/// # Examples /// # Examples
/// ///
/// ```rust /// ```
/// use actix_http::Response; /// use actix_http::Response;
/// use actix_http::http::header::{ETag, EntityTag}; /// use actix_http::http::header::{ETag, EntityTag};
/// ///
/// let mut builder = Response::Ok(); /// let mut builder = Response::Ok();
/// builder.set(ETag(EntityTag::new(false, "xyzzy".to_owned()))); /// builder.insert_header(
/// ETag(EntityTag::new(false, "xyzzy".to_owned()))
/// );
/// ``` /// ```
/// ///
/// ```rust /// ```
/// use actix_http::Response; /// use actix_http::Response;
/// use actix_http::http::header::{ETag, EntityTag}; /// use actix_http::http::header::{ETag, EntityTag};
/// ///
/// let mut builder = Response::Ok(); /// let mut builder = Response::Ok();
/// builder.set(ETag(EntityTag::new(true, "xyzzy".to_owned()))); /// builder.insert_header(
/// ETag(EntityTag::new(true, "xyzzy".to_owned()))
/// );
/// ``` /// ```
(ETag, ETAG) => [EntityTag] (ETag, ETAG) => [EntityTag]

View File

@ -21,14 +21,16 @@ header! {
/// ///
/// # Example /// # Example
/// ///
/// ```rust /// ```
/// use std::time::{SystemTime, Duration};
/// use actix_http::Response; /// use actix_http::Response;
/// use actix_http::http::header::Expires; /// use actix_http::http::header::Expires;
/// use std::time::{SystemTime, Duration};
/// ///
/// let mut builder = Response::Ok(); /// let mut builder = Response::Ok();
/// let expiration = SystemTime::now() + Duration::from_secs(60 * 60 * 24); /// let expiration = SystemTime::now() + Duration::from_secs(60 * 60 * 24);
/// builder.set(Expires(expiration.into())); /// builder.insert_header(
/// Expires(expiration.into())
/// );
/// ``` /// ```
(Expires, EXPIRES) => [HttpDate] (Expires, EXPIRES) => [HttpDate]

View File

@ -29,20 +29,20 @@ header! {
/// ///
/// # Examples /// # Examples
/// ///
/// ```rust /// ```
/// use actix_http::Response; /// use actix_http::Response;
/// use actix_http::http::header::IfMatch; /// use actix_http::http::header::IfMatch;
/// ///
/// let mut builder = Response::Ok(); /// let mut builder = Response::Ok();
/// builder.set(IfMatch::Any); /// builder.insert_header(IfMatch::Any);
/// ``` /// ```
/// ///
/// ```rust /// ```
/// use actix_http::Response; /// use actix_http::Response;
/// use actix_http::http::header::{IfMatch, EntityTag}; /// use actix_http::http::header::{IfMatch, EntityTag};
/// ///
/// let mut builder = Response::Ok(); /// let mut builder = Response::Ok();
/// builder.set( /// builder.insert_header(
/// IfMatch::Items(vec![ /// IfMatch::Items(vec![
/// EntityTag::new(false, "xyzzy".to_owned()), /// EntityTag::new(false, "xyzzy".to_owned()),
/// EntityTag::new(false, "foobar".to_owned()), /// EntityTag::new(false, "foobar".to_owned()),

View File

@ -21,14 +21,16 @@ header! {
/// ///
/// # Example /// # Example
/// ///
/// ```rust /// ```
/// use std::time::{SystemTime, Duration};
/// use actix_http::Response; /// use actix_http::Response;
/// use actix_http::http::header::IfModifiedSince; /// use actix_http::http::header::IfModifiedSince;
/// use std::time::{SystemTime, Duration};
/// ///
/// let mut builder = Response::Ok(); /// let mut builder = Response::Ok();
/// let modified = SystemTime::now() - Duration::from_secs(60 * 60 * 24); /// let modified = SystemTime::now() - Duration::from_secs(60 * 60 * 24);
/// builder.set(IfModifiedSince(modified.into())); /// builder.insert_header(
/// IfModifiedSince(modified.into())
/// );
/// ``` /// ```
(IfModifiedSince, IF_MODIFIED_SINCE) => [HttpDate] (IfModifiedSince, IF_MODIFIED_SINCE) => [HttpDate]

View File

@ -31,20 +31,20 @@ header! {
/// ///
/// # Examples /// # Examples
/// ///
/// ```rust /// ```
/// use actix_http::Response; /// use actix_http::Response;
/// use actix_http::http::header::IfNoneMatch; /// use actix_http::http::header::IfNoneMatch;
/// ///
/// let mut builder = Response::Ok(); /// let mut builder = Response::Ok();
/// builder.set(IfNoneMatch::Any); /// builder.insert_header(IfNoneMatch::Any);
/// ``` /// ```
/// ///
/// ```rust /// ```
/// use actix_http::Response; /// use actix_http::Response;
/// use actix_http::http::header::{IfNoneMatch, EntityTag}; /// use actix_http::http::header::{IfNoneMatch, EntityTag};
/// ///
/// let mut builder = Response::Ok(); /// let mut builder = Response::Ok();
/// builder.set( /// builder.insert_header(
/// IfNoneMatch::Items(vec![ /// IfNoneMatch::Items(vec![
/// EntityTag::new(false, "xyzzy".to_owned()), /// EntityTag::new(false, "xyzzy".to_owned()),
/// EntityTag::new(false, "foobar".to_owned()), /// EntityTag::new(false, "foobar".to_owned()),
@ -73,13 +73,15 @@ mod tests {
fn test_if_none_match() { fn test_if_none_match() {
let mut if_none_match: Result<IfNoneMatch, _>; let mut if_none_match: Result<IfNoneMatch, _>;
let req = TestRequest::with_header(IF_NONE_MATCH, "*").finish(); let req = TestRequest::default()
.insert_header((IF_NONE_MATCH, "*"))
.finish();
if_none_match = Header::parse(&req); if_none_match = Header::parse(&req);
assert_eq!(if_none_match.ok(), Some(IfNoneMatch::Any)); assert_eq!(if_none_match.ok(), Some(IfNoneMatch::Any));
let req = let req = TestRequest::default()
TestRequest::with_header(IF_NONE_MATCH, &b"\"foobar\", W/\"weak-etag\""[..]) .insert_header((IF_NONE_MATCH, &b"\"foobar\", W/\"weak-etag\""[..]))
.finish(); .finish();
if_none_match = Header::parse(&req); if_none_match = Header::parse(&req);
let mut entities: Vec<EntityTag> = Vec::new(); let mut entities: Vec<EntityTag> = Vec::new();

Some files were not shown because too many files have changed in this diff Show More