1
0
mirror of https://github.com/fafhrd91/actix-web synced 2025-07-05 18:35:22 +02:00

Compare commits

...

912 Commits

Author SHA1 Message Date
64a2c13cdf the big three point oh (#1668) 2020-09-11 13:50:10 +01:00
bf53fe5a22 bump actix dependency to v0.10 (#1666) 2020-09-11 12:09:52 +01:00
cf5138e740 fix clippy async_yields_async lints (#1667) 2020-09-11 11:29:17 +01:00
121075c1ef awc: Rename Client::build to Client::builder (#1665) 2020-09-11 09:24:39 +01:00
22089aff87 Improve json, form and query extractor config docs (#1661) 2020-09-10 15:40:20 +01:00
7787638f26 fix CI clippy warnings (#1664) 2020-09-10 14:46:35 +01:00
2f6e9738c4 prepare multipart and actors releases (#1663) 2020-09-10 12:54:27 +01:00
e39d166a17 Fix examples hyperlink in README (#1660) 2020-09-10 00:12:50 +01:00
059d1671d7 prepare release beta 4 (#1659) 2020-09-09 22:14:11 +01:00
3a27580ebe awc: improve module documentation (#1656)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-09-09 14:24:12 +01:00
9d0534999d bump connect and tls versions (#1655) 2020-09-09 09:20:54 +01:00
c54d73e0bb Improve awc websocket docs (#1654)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-09-07 12:04:54 +01:00
9a9d4b182e document all remaining unsafe usages (#1642)
adds some debug assertions where appropriate
2020-09-03 10:00:24 +01:00
4e321595bc extract more config types from Data<T> as well (#1641) 2020-09-02 22:12:07 +01:00
01cbef700f Fix a small typo in a doc comment. (#1649) 2020-08-28 22:16:41 +01:00
8497b5f490 integrate with updated actix-{codec, utils} (#1634) 2020-08-24 10:13:35 +01:00
LJ
75d86a6beb Configurable trailing slash behaviour for NormalizePath (#1639)
Co-authored-by: ljoonal <ljoona@ljoonal.xyz>
2020-08-19 12:21:52 +01:00
3892a95c11 Fix actix-web version to publish 2020-08-18 01:16:18 +09:00
5802eb797f awc,web: Bump up to next beta releases (#1638) 2020-08-18 01:08:40 +09:00
ff2ca0f420 Update rustls to 0.18 (#1637) 2020-08-18 00:28:39 +09:00
59ad1738e9 web: Bump up to 3.0.0-beta.2 (#1636) 2020-08-17 11:32:38 +01:00
aa2bd6fbfb http: Bump up to 2.0.0-beta.3 (#1630) 2020-08-14 19:42:14 +09:00
5aad8e24c7 Re-export all error types from awc (#1621) 2020-08-14 01:24:35 +01:00
6e97bc09f8 Use action to upload docs 2020-08-13 16:04:50 +09:00
160995b8d4 fix awc pool leak (#1626) 2020-08-09 21:49:43 +01:00
187646b2f9 match HttpRequest app_data behavior in ServiceRequest (#1618) 2020-08-09 15:51:38 +01:00
46627be36f add dep graph dot graphs (#1601) 2020-08-09 13:54:35 +01:00
a78380739e require rustls feature for client example (#1625) 2020-08-09 13:32:37 +01:00
cf1c8abe62 prepare release http & awc (#1617) 2020-07-22 01:13:10 +01:00
92b5bcd13f Check format and tweak CI config (#1619) 2020-07-22 00:28:33 +01:00
701bdacfa2 Fix illegal chunked encoding (#1615)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-07-21 17:24:56 +01:00
6dc47c4093 fix soundness concern in h1 decoder (#1614)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-07-21 16:25:33 +01:00
0ec335a39c bump MSRV to 1.42 (#1616) 2020-07-21 16:40:30 +09:00
f8d5ad6b53 Make web::Path a tuple struct with a public inner value (#1594)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-07-21 00:54:26 +01:00
43c362779d also try extracting payload config as Data<T> (#1610) 2020-07-20 17:40:58 +01:00
971ba3eee1 fix continous growth of app data in pooled requests (#1609)
fixes #1606
fixes #1607
2020-07-18 16:17:00 +01:00
2fd96c03e5 prepare beta.1 release for multipart/files/actors (#1605) 2020-07-16 11:38:57 +01:00
ad7c6d2633 prepare actix-web v3.0.0-beta.1 release (#1600) 2020-07-15 00:44:44 +01:00
3362a3d61b Merge pull request #1603 from JohnTitor/license
Avoid using deprecated `/` in license field
2020-07-14 15:25:48 +09:00
769ea6bd5b Merge pull request #1602 from actix/release/awc-beta-1
prepare awc v2.0.0-beta.1 release
2020-07-14 13:33:05 +09:00
1382094c15 Avoid using deprecated / in license field 2020-07-14 11:19:56 +09:00
78594a72bd prepare awc v2.0.0-beta.1 release 2020-07-14 03:16:26 +01:00
327e472e44 prepare http-2.0.0-beta.1 release (#1596) 2020-07-13 15:35:30 +01:00
e10eb648d9 Fix leaks with actix_http's client (#1580) 2020-07-10 22:35:22 +01:00
a2662b928b Update PULL_REQUEST_TEMPLATE.md 2020-07-10 14:55:56 +01:00
84583799be Merge pull request #1592 from JohnTitor/fix-tarpaulin
Use tarpaulin 0.13 to avoid failure
2020-07-07 05:11:09 +09:00
08f9a34075 Use tarpaulin 0.13 to avoid failure 2020-07-07 04:00:18 +09:00
056803d534 revamp readme and root doc page (#1590) 2020-07-05 01:16:53 +01:00
deab634247 actix-http: Update sha-1 to 0.9 (#1586) 2020-07-03 01:08:24 +01:00
0b641a2db2 Merge pull request #1585 from JohnTitor/v-htmlescape
Update `v_htmlescape` to 0.10
2020-07-03 07:46:26 +09:00
f2d641b772 Update v_htmlescape to 0.10 2020-07-02 17:52:42 +09:00
23c8191cca add method to extract matched resource name (#1577)
Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-06-27 16:22:16 +01:00
487f90be5b move pull request template (#1578) 2020-06-23 14:26:07 +09:00
fa28175a74 add method to extract matched resource pattern (#1566) 2020-06-23 00:58:20 +01:00
a70e599ff5 re-export rt in web and add main macro (#1575) 2020-06-22 20:09:48 +01:00
c11052f826 add PR template for bugs and features (#1570)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-06-22 18:28:06 +09:00
73ec01e83b Merge pull request #1576 from b4skyx/patch-1
Update benchmark url in README.md
2020-06-22 16:48:49 +09:00
a7c8533291 Update benchmark url in README.md
Updated benchmark url from r18 to the latest r19.
2020-06-22 04:14:48 +00:00
eb0eda69c6 migrate cookie handling to cookie crate (#1558) 2020-06-19 14:34:14 +01:00
dc74db1f2f re-export actix_rt::main macro (#1559) 2020-06-18 15:45:30 +01:00
0ba73fc44c exclude default -web features on -actors (#1562) 2020-06-18 11:23:36 +01:00
9af07d66ae Fix NormalizePath trailing slash behavior (#1548) 2020-06-17 10:54:20 +01:00
e72ee28232 Enforce HW_BUFFER_SIZE inside h1::dispatcher (#1550) 2020-06-17 08:58:23 +01:00
4f9a1ac3b7 Merge pull request #1553 from taiki-e/pin-project
Remove uses of pin_project::project attribute
2020-06-07 02:00:01 +09:00
6c5c4ea230 Remove uses of pin_project::project attribute
pin-project will deprecate the project attribute due to some unfixable
limitations.

Refs: https://github.com/taiki-e/pin-project/issues/225
2020-06-06 06:44:14 +09:00
a79450482c Merge pull request #1547 from JohnTitor/appveyor
Remove AppVeyor config
2020-06-03 10:43:01 +09:00
5286b8aed7 Remove AppVeyor config 2020-06-03 03:39:35 +09:00
621ebec01a Fix typo in timeout error display (#1552) 2020-06-02 18:04:49 +01:00
5e5c8b1c83 Merge pull request #1544 from JohnTitor/remove-framed
Remove actix-framed from workspace
2020-05-31 16:10:46 +09:00
322e7c15d1 Remove actix-framed from workspace 2020-05-31 05:50:32 +09:00
7aa757ad5a Merge pull request #1540 from JohnTitor/next-multipart
multipart: Bump up to 0.3.0-alpha.1
2020-05-25 22:04:07 +09:00
482f74e409 multipart: Bump up to 0.3.0-alpha.1 2020-05-25 19:12:20 +09:00
19967c41cc Merge pull request #1538 from JohnTitor/codegen-meta
Tweak codegen metadata
2020-05-25 17:37:58 +09:00
8a106c07b4 Tweak codegen metadata 2020-05-25 16:45:34 +09:00
11a9fdad95 Merge pull request #1535 from JohnTitor/next-files
files: Bump up to 0.3.0-alpha.1
2020-05-24 11:52:19 +09:00
75a34dc8bc files: Bump up to 0.3.0-alpha.1 2020-05-23 18:47:08 +09:00
5b60ee8cfd Merge pull request #1534 from JohnTitor/next-codegen
codegen: Bump up to 0.2.2
2020-05-23 18:08:36 +09:00
bb89d04080 codegen: Bump up to 0.2.2 2020-05-23 17:22:30 +09:00
f57b5659da Merge pull request #1533 from JohnTitor/next-test-server
http-test: Bump up to 2.0.0-alpha.1
2020-05-23 15:22:40 +09:00
4a955c425d Update actix-http-test dependency to 2.0.0-alpha.1 2020-05-23 12:14:17 +09:00
905f86b540 http-test: Bump up to 2.0.0-alpha.1 2020-05-23 12:13:43 +09:00
0348114ad8 Merge pull request #1532 from JohnTitor/issue-template
Add links to Gitter on the issue template
2020-05-23 12:11:59 +09:00
9c5a2d6580 Add links to Gitter on the issue template 2020-05-22 13:14:01 +09:00
2550f00702 Merge pull request #1530 from NickKolpinskiy/itoa
Use `itoa` in the content-length helper
2020-05-22 13:03:50 +09:00
7d8fb631a0 Use itoa in the content-length helper 2020-05-21 22:25:34 +03:00
b9e268e95f Merge pull request #1529 from JohnTitor/next-web
web: Bump up to 3.0.0-alpha.3
2020-05-21 19:32:24 +09:00
6dd78d9355 Run rustfmt 2020-05-21 17:56:53 +09:00
fe89ba7027 Update actix-web dependency to 3.0.0-alpha.3 2020-05-21 17:32:36 +09:00
5d39110470 web: Bump up to 3.0.0-alpha.3 2020-05-21 17:31:22 +09:00
5bde4e2529 Merge pull request #1528 from JohnTitor/next-awc
awc: Bump up to 2.0.0-alpha.2
2020-05-21 17:25:37 +09:00
9b72d33b79 Update awc to 2.0.0-alpha.2 2020-05-21 16:48:20 +09:00
0f826fd11a awc: Bump up to 2.0.0-alpha.2 2020-05-21 16:47:16 +09:00
cf92cfa777 Merge pull request #1527 from JohnTitor/next-http
http: Bump up to 2.0.0-alpha.4
2020-05-21 16:35:34 +09:00
9cfb32c780 Update actix-http to 2.0.0-alpha.4 2020-05-21 15:22:42 +09:00
48fa78e182 http: Bump up to 2.0.0-alpha.4 2020-05-21 15:22:07 +09:00
184683a698 Merge pull request #1525 from JohnTitor/deps
Update dependencies
2020-05-21 11:58:59 +09:00
6c78f57a70 test-server: Update dependencies 2020-05-21 09:52:15 +09:00
603973dede awc: Update base64 to 0.12 2020-05-21 09:51:58 +09:00
8391427905 http: Update base64 to 0.12 2020-05-21 09:51:32 +09:00
2314dc30b5 Merge pull request #1519 from JohnTitor/sample-size
bench: Reduce sample count to remove warning
2020-05-21 01:49:17 +09:00
864fc489ce CI: Reduce sample size 2020-05-20 22:33:25 +09:00
c48af0c822 Merge pull request #1522 from actix/only-server-bench
CI: Only run the server benchmark to avoid SIGKILL
2020-05-20 15:22:13 +09:00
50adbdecbe CI: Only run the server benchmark to avoid SIGKILL 2020-05-20 13:12:08 +09:00
f8f5a82f40 Merge pull request #1518 from JohnTitor/replace-net2
Replace deprecated `net2` crate with `socket2`
2020-05-19 10:25:22 +09:00
9a7f93610a web: Replace net2 crate with socket2 2020-05-19 09:34:37 +09:00
2dac9afc4e test-server: Replace net2 crate with socket2 2020-05-19 09:25:51 +09:00
74491dca59 Merge pull request #1515 from JohnTitor/minimize-futures
Minimize `futures` dependencies
2020-05-19 09:18:07 +09:00
81b0c32062 test-server: Minimize futures dependencies 2020-05-19 08:29:12 +09:00
a98e53ecb8 web: Minimize futures dependencies 2020-05-19 08:29:12 +09:00
d7abbff3b0 awc: Minimize futures dependencies 2020-05-19 08:29:12 +09:00
24372467d9 codegen: Minimize futures dependencies 2020-05-19 08:29:11 +09:00
fc8e07b947 actors: Minimize futures dependencies 2020-05-19 08:29:11 +09:00
ab4d8704f1 multipart: Minimize futures dependencies 2020-05-19 08:29:11 +09:00
292af145cb http: Minimize futures dependencies 2020-05-19 08:29:11 +09:00
9bd6407730 framed: Minimize futures dependencies 2020-05-19 08:24:34 +09:00
245dd471dd files: Minimize futures dependencies 2020-05-19 08:24:34 +09:00
32a37b7282 Remove content_length from ResponseBuilder (#1491)
* Remove content_length since it'll be overwritten by the response body. FIXES #1439

* Add setting of Content-Length to the no_chunking function

* Add changes and migration documentations

* Update MIGRATION.md

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>

Co-authored-by: Rob Ede <robjtede@icloud.com>
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-05-19 07:46:31 +09:00
426a9b5d4d Merge pull request #1514 from actix/remove/sized64
remove needless BodySize::Sized64 variant
2020-05-18 10:40:01 +09:00
7e8ea44d5c remove needless BodySize::Sized64 variant 2020-05-18 00:42:51 +01:00
b0866a8a0f Actix-files will always send SizedStream (#1496)
* Fixes #1384

* There is no need to set no_chunking

* Test content-length for static files

* Update the tests

* Add Changelog

* Try to simply fix Windows test issues!

Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-05-18 06:54:42 +09:00
7fe426f626 Merge pull request #1512 from JohnTitor/remove-outdated-members
Remove outdated members
2020-05-17 12:30:03 +09:00
433a4563cf Remove outdated members 2020-05-17 10:56:06 +09:00
f3b0233477 use mem::take where possible (#1507) 2020-05-17 10:54:42 +09:00
201090d7a2 Provide impl<T> From<Arc<T>> for Data<T> (#1509) 2020-05-16 00:27:03 +01:00
4fc99d4a6f Fix audit issue logging by default peer address (#1485)
* Fix audit issue logging by default peer address

By default log format include remote address that is taken from headers.
This is very easy to replace making log untrusted.

Changing default log format value `%a` to peer address we are getting
this trusted data always. Also, remote address option is maintianed and
relegated to `%{r}a` value.

Related  kanidm/kanidm#191.

* Rename peer/remote to remote_addr/realip_remote_addr

Change names to avoid naming confusions. I choose this accord to Nginx
variables and
[ngx_http_realip_module](https://nginx.org/en/docs/http/ngx_http_realip_module.html).

Add more specific documentation about security concerns of using Real IP
in logger.

* Rename security advertise header in doc

* Add fix audit issue logging by default peer adress to changelog

Co-authored-by: Rob Ede <robjtede@icloud.com>
2020-05-15 09:07:27 +09:00
92ce975d87 Merge pull request #1506 from actix/chore/bump-140
bump msrv to 1.40
2020-05-13 23:46:13 +09:00
996f1d7eae bump msrv in ci and readme 2020-05-13 01:57:37 +01:00
63864ecf9e support parsing of SameSite=None (#1503) 2020-05-12 17:48:35 +01:00
bbd4d19830 Merge pull request #1486 from actix/feat/data-cascade
allow parent data containers to be accessed from child scopes
2020-05-09 09:40:25 +09:00
879cad9422 allow parent data containers to be accessed from child scopes 2020-05-09 00:31:26 +01:00
6e8ff5c905 Merge pull request #1495 from JohnTitor/new-web
actix-web: Bump up to 3.0.0-alpha.2
2020-05-08 07:28:18 +09:00
b66c3083a5 Update the actix-web dependency to 3.0.0-alpha.2 2020-05-08 06:46:42 +09:00
b6b3481c6f web: Bump up to 3.0.0-alpha.2 2020-05-08 06:46:13 +09:00
574714d156 Merge pull request #1494 from JohnTitor/new-actors
actors: Bump up to 3.0.0-alpha.1
2020-05-08 06:17:15 +09:00
54abf356d4 actors: Bump up to 3.0.0-alpha.1 2020-05-08 03:33:29 +09:00
9cb3b0ef58 Merge pull request #1493 from JohnTitor/http-next
http: Bump up to 2.0.0-alpha.3
2020-05-08 03:09:52 +09:00
9d0c80b6ce Update actix-http deps 2020-05-08 02:35:45 +09:00
0bc4a5e703 http: Bump up to 2.0.0-alpha.3 2020-05-08 02:35:45 +09:00
9d94fb91b2 correct spelling of ConnectError::Unresolved (#1487)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-05-08 02:26:48 +09:00
9164ed1f0c add resource middleware on actix-web-codegen (#1467)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-05-07 18:31:12 +09:00
b521e9b221 conditional test compilation [range, charset] (#1483)
* conditionally compile range and charset tests

* remove deprecated try macros

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-05-03 22:33:29 +09:00
f37cb6dd0b refactor h1 status line helper to remove unsafe usage (#1484)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-05-03 17:37:40 +09:00
d5ceae2074 Replace deprecated now with now_utc (#1481)
* Replace deprecated now with now_utc

* Update doctest
2020-05-02 10:14:50 +01:00
c27d3fad8e clarify resource/scope app data overriding (#1476)
* relocate FnDataFactory

* clarify app data overriding in Scope and Resource

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-04-30 02:20:47 +09:00
bb17280f51 simplify data factory future polling (#1473)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-04-29 15:38:53 +09:00
b047413b39 Small ws codec fix (#1465)
* Small ws codec fix

* Update actix-http/Changes.md

Co-authored-by: Huston Bokinsky <huston@deepgram.com>
2020-04-29 11:13:09 +09:00
Huy
ce24630d31 Fix typos in MIGRATION.md (#1470)
* Fix typos in MIGRATION.md

Those are `crate` not `create`

* Update MIGRATION.md
2020-04-23 03:39:09 +09:00
751253f23e Merge pull request #1463 from actix/fix/doc-typos
fix spelling errors in doc comments
2020-04-21 13:47:03 +09:00
5b0f7fff69 fix spelling errors in doc comments 2020-04-21 04:09:35 +01:00
54619cb680 actix-http: Remove failure support (#1449) 2020-04-16 06:54:34 +09:00
5b36381cb0 Merge pull request #1452 from actix/fix/default-service-data
set data container on default service calls
2020-04-16 06:01:56 +09:00
45e2e40140 set data container on default service calls
closes #1450
2020-04-14 02:33:19 +01:00
df3f722589 Merge pull request #1451 from actix/cache
Remove cache config from GHA workflows
2020-04-13 06:06:45 +09:00
e7ba871bbf Remove cache config from GHA workflows 2020-04-13 03:42:44 +09:00
ebc2e67015 Merge pull request #1442 from JohnTitor/workspace-doc
Deploy all the workspace crates' docs
2020-04-09 00:48:08 +09:00
74ddc852c8 Tweak README 2020-04-08 04:48:01 +09:00
dfaa330a94 Deploy all the workspace crates' docs 2020-04-08 04:42:38 +09:00
0ad02ee0e0 Add convenience functions for testing (#1401)
* Add convenience functions for testing

* Fix remarks from PR and add tests

* Add unpin to read_json_body

* Update changelog
2020-04-06 04:12:44 +09:00
aaff68bf05 Change NormalizePath to append trailing slash (#1433)
* Change NormalizePath to append trailing slash

* add tests

* Update CHANGES.md

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-04-05 03:26:40 +09:00
fcb1dec235 Merge pull request #1431 from OSSystems/topic/explicit-features-requirements
Add explicit feature requirements for examples and tests
2020-03-28 10:58:00 +09:00
7b7daa75a4 Add explicit feature requirements for examples and tests
This allow us to build 'actix-web' without default features and run all
tests.

Signed-off-by: Otavio Salvador <otavio@ossystems.com.br>
2020-03-25 23:49:24 -03:00
2067331884 Refactor actix-codegen duplicate code (#1423)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-03-20 04:40:42 +09:00
bf630d9475 Merge pull request #1422 from OSSystems/topic/impl-error-more-types
Implement `std::error::Error` for our custom errors
2020-03-19 05:05:57 +09:00
146ae4da18 Implement std::error::Error for our custom errors
For allowing a more ergonomic use and better integration on the
ecosystem, this adds the `std::error::Error` `impl` for our custom
errors.

We intent to drop this hand made code once `derive_more` finishes the
addition of the Error derive support[1]. Until that is available, we
need to live with that.

1. https://github.com/JelteF/derive_more/issues/92

Signed-off-by: Otavio Salvador <otavio@ossystems.com.br>
2020-03-18 00:22:18 -03:00
52c5755d56 Merge pull request #1421 from actix/JohnTitor-patch-1
Upload coverage on PRs
2020-03-18 06:16:41 +09:00
5548c57a09 Upload coverage on PRs 2020-03-18 05:04:30 +09:00
0d958fabd7 📝 Improve the code example for JsonConfig (#1418)
* 📝 Improve the code example for JsonConfig

* Remove a redundant comment
2020-03-17 08:23:54 +09:00
c67e4c1fe8 Refactored macros (#1333)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-03-15 07:23:28 +09:00
4875dfbec7 Merge pull request #1416 from JohnTitor/fix-doc
Fix `read_body` doc
2020-03-13 07:06:57 +09:00
d602a7e386 Fix read_body doc 2020-03-13 05:52:58 +09:00
9f196fe5a5 Merge pull request #1413 from OSSystems/topic/fix-warnings
Fix clippy warnings
2020-03-12 16:24:44 +09:00
e4adcd1935 Merge pull request #1411 from JohnTitor/patch
Clean-up metadata
2020-03-12 16:21:34 +09:00
7e0d898d5a Fix clippy warnings
Signed-off-by: Otavio Salvador <otavio@ossystems.com.br>
2020-03-12 00:52:21 -03:00
51518721e5 Add notes to READMEs 2020-03-12 07:57:38 +09:00
c02d3e205b Clean-up metadata 2020-03-12 07:57:20 +09:00
a253d7d3ce Merge pull request #1410 from JohnTitor/new-web
Release actix-web 3.0.0-alpha.1
2020-03-12 05:12:54 +09:00
0152cedc5d Update changelog 2020-03-12 03:24:15 +09:00
a6a47b95ee Exclude actix-cors 2020-03-12 03:04:31 +09:00
1b28a5d48b Update actix-web dependency to 3.0.0-alpha.1 2020-03-12 03:03:50 +09:00
c147b94832 Bump up to 3.0.0-alpha.1 2020-03-12 03:03:22 +09:00
95f9a12a5e dev-deps: Update env_logger to 0.7 2020-03-12 02:58:22 +09:00
73eeab0e90 Merge pull request #1391 from JohnTitor/new-awc
Release awc v2.0.0-alpha.1
2020-03-11 21:15:48 +09:00
ce1e996b41 Update release date 2020-03-11 20:42:45 +09:00
e718f65121 Update tests 2020-03-08 16:42:45 +09:00
a9a475d555 Make test_server async fn 2020-03-08 16:42:26 +09:00
b93e1555ec Update actix-connect to 2.0.0-alpha.2 2020-03-08 15:27:40 +09:00
6f33b7ea42 Update awc dependency 2020-03-08 15:27:40 +09:00
294523a32f Bump up to 2.0.0-alpha.1 2020-03-08 15:27:39 +09:00
6b626c7d77 dev-deps: Update env_logger to 0.7 2020-03-08 03:07:53 +09:00
5da9e277a2 Merge pull request #1399 from JohnTitor/new-http
Release actix-http 2.0.0-alpha.2
2020-03-08 01:47:40 +09:00
0d5646a8b6 Run rustfmt 2020-03-08 00:52:39 +09:00
7941594f94 Update actix-http dependency 2020-03-08 00:50:20 +09:00
6f63acaf01 Bump up to 2.0.0-alpha.2 2020-03-08 00:48:45 +09:00
7172885beb Update changelog 2020-03-08 00:43:17 +09:00
cf721c5fff Update README example 2020-03-08 00:43:01 +09:00
10e3e72595 Http2 client configuration to improve performance (#1394)
* add defaults for http2 client configuration

* fix spaces

* Add changes text for extended H2 defaults buffers

* client: configurable H2 window sizes and max_http_version

* add H2 window size configuration and max_http_version to awc::ClientBuilder

* add awc::ClientBuilder H2 window sizes and max_http_version

* add test for H2 window size settings

* cleanup comment

* Apply code review fixes

* Code review fix for awc ClientBuilder

* Remove unnecessary comments on code review

* pin quote version to resolve build issue

* max_http_version to accept http::Version

* revert fix for quote broken build
2020-03-07 11:09:31 +09:00
a7d805aab7 Merge pull request #1396 from Aaron1011/fix/reapply-dispatcher
Re-apply commit 2cf7b3ad20
2020-03-05 02:48:20 +09:00
e90950fee1 Re-apply commit 2cf7b3ad20
This ended up getting reverted by #1367, which re-introduced an unsound
use of `Pin::new_unchecked`

See my original PR #1374 for the reasoning behind this change.
2020-03-04 11:27:58 -05:00
c8f0672ef7 Merge pull request #1395 from JohnTitor/rustls
Update `rustls` to 0.17
2020-03-04 15:56:27 +09:00
9d661dc4f3 Update changelog 2020-03-04 15:20:14 +09:00
687dc609dd Update rustls to 0.17 2020-03-04 15:11:31 +09:00
b9b52079e0 Update actix-tls to 2.0.0-alpha.1 2020-03-04 15:10:23 +09:00
117d28f7ba Update actix-connect to 2.0.0-alpha.1 2020-03-04 15:09:31 +09:00
795a575fc5 Merge pull request #1386 from JohnTitor/deny-to-warn
Demote lint level to warn
2020-02-28 14:17:11 +09:00
b4d63667df Demote lint level to warn 2020-02-27 22:39:11 +09:00
3dc859af58 Fix missing std::error::Error implement for MultipartError. (#1382)
* Fix missing `std::error::Error` implement for `MultipartError`.

* Update actix-multipart CHANGES.md.
2020-02-27 22:34:06 +09:00
1fa02b5f1c Merge pull request #1385 from JohnTitor/http-2-alpha
Release actix-http 2.0.0-alpha.1
2020-02-27 14:47:32 +09:00
c9fdcc596d Update actix to 0.10.0-alpha.1 2020-02-27 12:46:29 +09:00
6cc83dbb67 Allow clippy lint for compatibility 2020-02-27 12:45:11 +09:00
3b675c9125 Update actix-http to 2.0.0-alpha.1 2020-02-27 12:39:04 +09:00
15a2587887 Bump up to 2.0.0-alpha.1 2020-02-27 12:39:04 +09:00
0173f99726 Update changelog 2020-02-27 12:39:04 +09:00
f27dd19093 Fix Clippy warnings 2020-02-27 12:39:04 +09:00
7ba14fd113 Run rustfmt 2020-02-27 11:10:55 +09:00
903ae47baa dev-deps: Update env_logger to 0.7 2020-02-27 11:08:45 +09:00
95c18dbdf3 Merge pull request #1367 from actix/msg-body
Merge `MessageBody` improvements
2020-02-27 10:42:14 +09:00
d3ccf46e92 Clean-up metadata 2020-02-27 09:53:27 +09:00
cd1765035c Avoid re-definition 2020-02-27 09:42:32 +09:00
ea28219d0f reenable actix-http test-ws 2020-02-27 09:42:32 +09:00
77058ef779 adopt MessageBody Pin changes to actix-web root 2020-02-27 09:42:32 +09:00
e5f2feec45 reenable actix-http from local path 2020-02-27 09:42:32 +09:00
0a86907dd2 use mem::replace instead of mem::take rust 1.40+ 2020-02-27 09:37:05 +09:00
78749a4b7e rollback actix-http version change 2020-02-27 09:37:05 +09:00
de815dd99c Fixed condition for finishing transfer of response 2020-02-27 09:37:05 +09:00
e6078bf792 Fix EncoderBody enum to align with Body::Message 2020-02-27 09:37:05 +09:00
a84b37199a Add Unpin to Body to get rid of unsafe in MessageBody 2020-02-27 09:37:05 +09:00
c05f9475c5 refactor dispatcher to avoid possible UB with DispatcherState Pin 2020-02-27 09:37:05 +09:00
69dab0063c Get rid of one more unsafe 2020-02-27 09:37:05 +09:00
ec5c779732 unlink MessageBody from Unpin 2020-02-27 09:37:05 +09:00
2e2ea7ab80 remove extra whitespaces and Unpins 2020-02-27 09:37:05 +09:00
eeebc653fd change actix-http version to alpha 2020-02-27 09:37:05 +09:00
835a00599c rollback missed dependencies and CHANGES in crates except actix-http 2020-02-27 09:37:05 +09:00
d9c415e540 disable weird poll test until actix-web based on actix-http:2 2020-02-27 09:37:05 +09:00
09a391a3ca rollback changes to actix-web, awc and test-server for now 2020-02-27 09:37:05 +09:00
62aba424e2 Rollback actix-http-test dependency to show the issue 2020-02-27 09:37:05 +09:00
9d04b250f9 This is a squashed commit:
- Convert MessageBody to accept Pin in poll_next

- add CHANGES and increase versions aligned to semver

- update crates to accomodate MessageBody Pin change

- fix tests and dependencies
2020-02-27 09:37:05 +09:00
a4148de226 add test crashing with segfault according to #1321 2020-02-27 09:36:30 +09:00
48ef4d7a26 Add actix-http support for actix error messages (#1379)
* Moved actix-http for actix from actix crate

* remove resolver feature

* renamed actix feature to actor

* fixed doc attr for actors, add documentation
2020-02-27 09:34:49 +09:00
71c4bd1b30 Remove uses of Pin::new_unchecked in h1 Dispatcher (#1374)
This removes the last uses of unsafe `Pin` functions in actix-web.

This PR adds a `Pin<Box<_>>` wrapper to `DispatcherState::Upgrade`,
`State::ExpectCall`, and `State::ServiceCall`.

The previous uses of the futures `State::ExpectCall` and `State::ServiceCall`
were Undefined Behavior - a future was obtained from `self.expect.call`
or `self.service.call`, pinned on the stack, and then immediately
returned from `handle_request`. The only alternative to using `Box::pin`
would be to refactor `handle_request` to write the futures directly into
their final location, or avoid polling them before they are returned.

The previous use of `DispatcherState::Upgrade` doesn't seem to be
unsound. However, having data pinned inside an enum that we
`std::mem::replace` would require some careful `unsafe` code to ensure
that we never call `std::mem::replace` when the active variant contains
pinned data. By using `Box::pin`, we any possibility of future
refactoring accidentally introducing undefined behavior.

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-02-26 08:21:05 +09:00
de1d6ad5cb Merge pull request #1344 from actix/replace-unsafe-content-length-helper
Replace unsafe content length helper
2020-02-25 17:02:22 +09:00
2a72e8d119 Merge branch 'master' into replace-unsafe-content-length-helper 2020-02-25 14:30:04 +09:00
2a8e5fdc73 Merge pull request #1370 from mattgathu/feat/helper-function-for-trace-method
Create helper function for HTTP Trace Method
2020-02-25 14:24:09 +09:00
b213c07799 Merge branch 'master' into feat/helper-function-for-trace-method 2020-02-25 12:36:20 +09:00
3d6b8686ad Merge pull request #1373 from JohnTitor/new-codegen
Release `actix-web-codegen` v0.2.1
2020-02-25 09:32:48 +09:00
a4f87a53da Update CHANGES.md 2020-02-25 08:42:39 +09:00
08f172a0aa Merge branch 'master' into new-codegen 2020-02-25 08:29:31 +09:00
7792eaa16e Merge pull request #1378 from JohnTitor/fix-doc
Fix doc comment
2020-02-25 08:29:14 +09:00
845ce3cf34 Fix doc comment 2020-02-25 07:46:03 +09:00
7daef22e24 Merge branch 'master' into new-codegen 2020-02-25 06:58:49 +09:00
1249262c35 Merge pull request #1372 from JohnTitor/time-0.2.7
Update `time` to 0.2.7
2020-02-25 06:58:33 +09:00
94da08f506 increase content-length fast path to responses up to 1MB 2020-02-24 20:58:41 +00:00
d143c44130 Update the ChangeLog 2020-02-23 09:33:28 +01:00
8ec8ccf4fb Create helper function for HTTP Trace Method
Create *route* with `TRACE` method guard.
2020-02-23 09:25:55 +01:00
c8ccc69b93 actix-http: update time to 0.2.7 2020-02-23 07:09:00 +09:00
f9f9fb4c84 actix-http-test: update time to 0.2.7 2020-02-23 07:08:50 +09:00
1b77963aac actix-web: update time to 0.2.7 2020-02-23 07:08:22 +09:00
036ffd43f9 Prepare for new release 2020-02-23 06:40:02 +09:00
bdccccd536 Merge pull request #1368 from mattgathu/add-missing-docs-attr-to-codegen-structs
Add`#[allow(missing_docs)]` attribute to generated structs
2020-02-23 06:26:42 +09:00
060c392c67 Add missing_docs attribute to generated structs 2020-02-22 10:32:12 +01:00
245f96868a impl downcast_ref for MessageBody (#1287)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-02-21 13:31:51 +09:00
b3f1071aaf Merge pull request #1361 from Aaron1011/fix/connector-pool-support
Use #[pin_project] with `ConnectorPoolSupport`
2020-02-21 06:01:26 +09:00
e6811e8818 Use #[pin_project] with ConnectorPoolSupport
This removes a use of `Pin::get_unchecked_mut`
2020-02-19 21:42:53 -05:00
809930d36e Add dependencies to docs example (#1343)
* Add dependencies to docs example

* Change codeblock type to toml

* Clarify the need for actix-rt

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-02-20 05:13:10 +09:00
f266b44cb0 replace unsafe blocks in write_usize helper 2020-02-16 15:20:25 +00:00
31a3515e90 add safe vs unsafe benchmarks 2020-02-16 14:31:06 +00:00
82b2786d6b replace unsafe content length implementation 2020-02-16 14:31:05 +00:00
6ab7cfa2be Remove descriptions about undefined uds feature from docs (#1356) 2020-02-16 04:18:31 +09:00
9b3f7248a8 Merge pull request #1354 from actix/JohnTitor-patch-1
Disable coverage for PRs
2020-02-14 08:18:09 +09:00
a1835d6510 Disable coverage for PRs 2020-02-14 07:31:29 +09:00
4484b3f66e Merge pull request #1347 from actix/bye-travis
Use Actions fully
2020-02-12 05:54:32 +09:00
cde3ae5f61 Remove Travis config 2020-02-08 05:36:11 +09:00
7d40b66300 Add some Actions workflows 2020-02-08 04:28:34 +09:00
63730c1f73 Merge pull request #1345 from JohnTitor/fix-warnings
Fix warnings
2020-02-08 04:17:04 +09:00
53ff3ad099 More ignore test causes timeout 2020-02-08 02:20:01 +09:00
6406f56ca2 Fix/suppress warnings 2020-02-08 02:20:01 +09:00
728b944360 Extensions module improvement and tests. (#1297)
* replace get.is_some to contains_key

* Add tests

* remove unnecessary box cast

* fix missing uints

* asserts fix

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-02-07 16:08:25 +09:00
3851a377df Fix minor grammatical errors (#1341) 2020-02-07 03:00:22 +09:00
fe13789345 Use Pin<Box<S>> in BodyStream and SizedStream (#1328)
Fixes #1321

A better fix would be to change `MessageBody` to take a `Pin<&mut
Self>`, rather than a `Pin<&mut Self>`. This will avoid requiring the
use of `Box` for all consumers by allowing the caller to determine how
to pin the `MessageBody` implementation (e.g. via stack pinning).

However, doing so is a breaking change that will affect every user of
`MessageBody`. By pinning the inner stream ourselves, we can fix the
undefined behavior without breaking the API.

I've included @sebzim4500's reproduction case as a new test case.
However, due to the nature of undefined behavior, this could pass (and
not segfault) even if underlying issue were to regress.

Unfortunately, until rust-lang/unsafe-code-guidelines#148 is resolved,
it's not even possible to write a Miri test that will pass when the bug
is fixed.

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-31 09:39:34 +09:00
3033f187d2 Enforce safety of downcast_ref at compile time. (#1326)
* Enforce safety of `downcast_ref` at compile time.

The safety of `downcast_ref` requires that `__private_get_type_id__` not
be overriden by callers, since the returned `TypeId` is used to check if
the cast is safe. However, all trait methods in Rust are public, so
users can override `__private_get_type_id__` despite it being
`#[doc(hidden)]`.

This commit makes `__private_get_type_id__` return a type with a private
constructor, ensuring that the only possible implementation is the
default implementation. A more detailed explanation is provided in the
comments added to the file.

Note that the standard library was affected by this type of issue with
the `Error::type_id` function: see https://blog.rust-lang.org/2019/05/14/Rust-1.34.2.html#whats-in-1.34.2-stable

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-30 23:43:35 +09:00
276a5a3ee4 Replace UnsafeCell with Cell in DateServiceInner (#1325)
* Replace `UnsafeCell` with `Cell` in `DateServiceInner`

This ensures that it's impossible to cause undefined behavior by
accidentally violating Rust's aliasing rules (e.g. passing a closure to
`set_date` which ends up invoking `reset` or `update` on the inner
`DateServiceInner`).

There might be a tiny amount of overhead from copying the `Option<(Date,
Instant)>` rather than taking a reference, but it shouldn't be
measurable.

Since the wrapped type is `Copy`, a `Cell` can be used, avoiding the
runtime overhead of a `RefCell`.

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-29 21:05:08 +09:00
664f9a8b2d Long lasting auto-prolonged session (#1292)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-29 10:26:39 +09:00
c73c2dc12c Don't use cache in Windows CI (#1327) 2020-01-29 09:00:04 +09:00
e634e64847 Upgrade time to 0.2.5 (#1254)
* Use `OffsetDateTime` instead of `PrimitiveDateTime`

* Parse time strings with `PrimitiveDateTime::parse` instead of `OffsetDateTime::parse`

* Remove unused `time` dependency from actix-multipart

* Fix a few errors with time related tests from the `time` upgrade

* Implement logic to convert a RFC 850 two-digit year into a full length year, and organize time parsing related functions

* Upgrade `time` to 0.2.2

* Correctly parse C's asctime time format using time 0.2's new format patterns

* Update CHANGES.md

* Use `time` without any of its deprecated functions

* Enforce a UTC time offset when converting an `OffsetDateTime` into a Header value

* Use the more readable version of `Duration::seconds(0)`, `Duration::zero()`

* Remove unneeded conversion of time::Duration to std::time::Duration

* Use `OffsetDateTime::as_seconds_f64` instead of manually calculating the amount of seconds from nanoseconds

* Replace a few additional instances of `Duration::seconds(0)` with `Duration::zero()`

* Truncate any nanoseconds from a supplied `Duration` within `Cookie::set_max_age` to ensure two Cookies with the same amount whole seconds equate to one another

* Fix the actix-http:🍪:do_not_panic_on_large_max_ages test

* Convert `Cookie::max_age` and `Cookie::expires` examples to `time` 0.2

Mainly minor  changes. Type inference can be used alongside the new
`time::parse` method, such that the type doesn't need to be specified.
This will be useful if a refactoring takes place that changes the type.
There are also new macros, which are used where possible.

One change that is not immediately obvious, in `HttpDate`, there was an
unnecessary conditional. As the time crate allows for negative durations
(and can perform arithmetic with such), the if/else can be removed
entirely.

Time v0.2.3 also has some bug fixes, which is why I am not using a more
general v0.2 in Cargo.toml.

v0.2.3 has been yanked, as it was backwards imcompatible. This version
reverts the breaking change, while still supporting rustc back to
1.34.0.

* Add missing `time::offset` macro import

* Fix type confusion when using `time::parse` followed by `using_offset`

* Update `time` to 0.2.5

* Update CHANGES.md

Co-authored-by: Jacob Pratt <the.z.cuber@gmail.com>
2020-01-28 20:44:22 +09:00
cdba30d45f Skip empty chucks for BodyStream and SizedStream (#1308)
* Skip empty chucks for BodyStream and SizedStream when streaming response (#1267)

* Fix tests to fail on previous implementation

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-28 18:28:09 +09:00
74dcc7366d Remove several uses of Pin::new_unchecked (#1294)
Most of the relevant struct already had a `#[pin_project]` attribute,
but it wasn't being used.

The remaining uses of `Pin::new_unchecked` all involve going from a
`&mut T` to a `Pin<&mut T>`, without directly observing a `Pin<&mut T>`
first. As such, they cannot be replaced by `pin_project`

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-28 12:35:51 +09:00
d137a8635b Replace Pin::new_unchecked with #[pin_project] in tuple_from_req! (#1293)
Using some module trickery, we can generate a tuple struct for each
invocation of the macro. This allows us to use `pin_project` to project
through to the tuple fields, removing the need to use
`Pin::new_unchecked`

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-28 10:45:25 +09:00
a2d4ff157e Update call_service documentation (#1302)
Co-authored-by: Christian Battaglia <christian.d.battaglia@gmail.com>
2020-01-28 08:09:46 +09:00
71d11644a7 Add ability to name a handler function as 'config' (#1290)
* eliminate handler naming restrictions #1277

* Update actix-web-codegen/CHANGES.md

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-26 07:22:40 +09:00
8888520d83 Add benchmark for full stack request lifecycle (#1298)
* add benchmark for full stack request lifecycle

* add direct service benchmarks

* fix newline

* add cloneable service benchmarks

* remove cloneable bench experiments + cargo fmt

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-25 08:05:25 +09:00
cf3577550c Tweak caches (#1319)
* Try to use `cargo-cache`

* Tweak issue template
2020-01-25 02:27:13 +09:00
58844874a0 Fixing #1295 convert UnsafeCell to RefCell in CloneableService (#1303)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-24 14:51:38 +09:00
78f24dda03 Initial Issue template (#1311)
* Initial Issue template

* First round of changes for the bug report

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-24 07:32:34 +09:00
e17b3accb9 Remove codecoverage for tests and examples (#1299)
* Ignore Tests & Examples for CodeCoverage

Ignore Tests & Examples for CodeCoverage
2020-01-24 05:10:02 +09:00
c6fa007e72 Fix vcpkg cache (#1312) 2020-01-23 11:27:34 +09:00
a3287948d1 allow explicit SameSite=None cookies (#1282)
fixes #1035
2020-01-23 10:08:23 +09:00
2e9ab0625e Tweak actions (#1305)
* Add benchmark action

* Fix Windows build
2020-01-23 06:23:53 +09:00
3a5b62b550 Add dependencies instruction (#1281) 2020-01-16 23:17:17 +09:00
412e54ce10 Fixed documentation typo for actix-files (#1278) 2020-01-15 11:09:58 -08:00
bca41f8d40 Changes to Cors builder (#1266)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-11 04:53:17 +09:00
7c974ee668 Update doc comment for HttpRequest::app_data (#1265)
* update doc comment for `HttpRequest::app_data`

* add `no_run` to doc comment

* add `ignore` to doc comment

* Update src/request.rs

Co-Authored-By: Yuki Okushi <huyuumi.dev@gmail.com>

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-11 03:55:20 +09:00
abb462ef85 Replace sha1 dependency with sha-1 (#1258)
* Replace sha1 dependency with sha-1

This other crate is being maintained, and it offers better performances
when using the `asm` feature (especially [on
AArch64](https://github.com/RustCrypto/hashes/pull/97)).

* Update CHANGES.md with the sha-1 migration

* Add a test for hash_key()
2020-01-11 02:34:31 +09:00
e66312b664 add extra constraints 2020-01-10 11:36:59 +06:00
39f4b2b39e Merge branch 'master' of github.com:actix/actix-web 2020-01-10 11:28:58 +06:00
f6ff056b8a Fix panic with already borrowed: BorrowMutError #1263 2020-01-10 11:26:54 +06:00
51ab4fb73d Tweak actions to use cache and not to be stuck on the way (#1264) 2020-01-10 03:30:45 +09:00
f5fd6bc49f Fix actix-http examples (#1259)
Fix actix-http examples
2020-01-07 00:15:04 +09:00
2803fcbe22 Small grammaritical update to lib.rs (#1248) 2020-01-03 08:45:17 +06:00
67793c5d92 add ssl feature migration 2019-12-30 21:22:04 +06:00
bcb5086c91 Added 2.0.0 rustls feature name change (#1244) 2019-12-30 21:16:04 +06:00
7bd2270290 Fix link to example in readme.md (#1236)
* Fix link to example in readme.md

* Add links to openssl and rustls examples

* Rustls should be uppercase
2019-12-26 19:42:07 +09:00
a4ad5e6b69 update timeouts for test server 2019-12-25 20:52:20 +04:00
6db909a3e7 update migration 2019-12-25 20:27:30 +04:00
642ae161c0 prep actix-web release 2019-12-25 20:21:00 +04:00
7b3c99b933 prep actix-framed release 2019-12-25 20:17:22 +04:00
f86ce0390e allow to specify multi pattern for resources 2019-12-25 20:14:44 +04:00
7882f545e5 Allow to gracefully stop test server via TestServer::stop() 2019-12-25 12:10:48 +04:00
1c75e6876b update migration 2019-12-22 17:16:07 +04:00
6a0cd2dced Rename HttpServer::start() to HttpServer::run() 2019-12-22 17:12:22 +04:00
c7f3915779 update actix-service dep 2019-12-22 16:39:25 +04:00
f45db1f909 Enable GitHub Actions and fix file URL behavior (#1232)
* Use GitHub Actions

* Fix unused imports on Windows

* Fix test for Windows

* Stop to run CI for i686-pc-windows-msvc for now

* Use `/` instead of `\` on Windows

* Add entry to changelog

* Prepare actix-files release
2019-12-22 16:43:41 +09:00
3751a4018e fixed test::init_service api docs (missing await) (#1230) 2019-12-21 08:47:18 +06:00
0cb1b0642f add test server data test 2019-12-20 23:18:59 +06:00
48476362a3 update changes 2019-12-20 17:59:34 +06:00
2b4256baab add links to configs 2019-12-20 17:49:05 +06:00
e5a50f423d Make web::Data deref to Arc<T> #1214 2019-12-20 17:45:35 +06:00
8b8a9a995d bump ver 2019-12-20 17:36:48 +06:00
74fa4060c2 fix awc tests 2019-12-20 17:27:32 +06:00
c877840c07 rename App::register_data to App::app_data and HttpRequest::app_data returns Option<&T> instead of Option<&Data<T>> 2019-12-20 17:13:09 +06:00
20248daeda Allow to set peer_addr for TestRequest #1074 2019-12-20 16:11:51 +06:00
a08d8dab70 AppConfig::secure() is always false. #1202 2019-12-20 16:04:51 +06:00
fbbb4a86e9 feat: add access to the session also from immutable references (#1225) 2019-12-20 13:59:07 +06:00
1d12ba9d5f Replace brotli with brotli2 #1224 2019-12-20 13:50:07 +06:00
8c54054844 Use .advance() intead of .split_to() 2019-12-19 09:56:14 +06:00
1732ae8c79 fix Bodyencoding trait usage 2019-12-18 09:30:14 +06:00
3b860ebdc7 Fix poll_ready call for WebSockets upgrade (#1219)
* Fix poll_ready call for WebSockets upgrade

* Poll upgrade service from H1ServiceHandler too
2019-12-17 13:34:25 +06:00
29ac6463e1 Merge branch 'master' of github.com:actix/actix-web 2019-12-16 17:22:49 +06:00
01613f334b Move BodyEncoding to dev module #1220 2019-12-16 17:22:26 +06:00
30dcaf9da0 fix deprecated Error::description (#1218) 2019-12-16 07:43:19 +06:00
b0aa9395da prep actix-web alpha.6 release 2019-12-15 22:51:14 +06:00
a153374b61 migrate actix-web-actors 2019-12-15 22:45:38 +06:00
a791aab418 prep awc release 2019-12-15 13:36:05 +06:00
cb705317b8 compile with default-features off 2019-12-15 13:28:54 +06:00
e8e0f98f96 fix docs.rs features list 2019-12-13 12:41:48 +06:00
c878f66d05 fix docs.rs features list 2019-12-13 12:40:22 +06:00
fac6dec3c9 update deps 2019-12-13 12:36:15 +06:00
232f71b3b5 update changes 2019-12-13 12:18:30 +06:00
8881c13e60 update changes 2019-12-13 12:16:43 +06:00
d006a7b31f update changes 2019-12-13 12:10:45 +06:00
3d64d565d9 fix warnings 2019-12-13 11:46:02 +06:00
c1deaaeb2f cleanup imports 2019-12-13 11:24:57 +06:00
b81417c2fa fix warnings 2019-12-13 10:59:02 +06:00
4937c9f9c2 refactor http-test server 2019-12-12 23:08:38 +06:00
db1d6b7963 refactor test server impl 2019-12-12 22:28:47 +06:00
fa07415721 Replace flate2-xxx features with compress 2019-12-12 15:08:08 +06:00
b4b3350b3e Add websockets continuation frame support 2019-12-12 14:06:54 +06:00
4a1695f719 fixes missing import in example (#1210) 2019-12-12 07:06:22 +06:00
1b8d747937 Fix extra line feed (#1209) 2019-12-12 07:05:39 +06:00
a43a005f59 Log path if it is not a directory (#1208) 2019-12-12 07:04:53 +06:00
a612b74aeb actix-multipart: Fix multipart boundary reading (#1205)
* actix-multipart: Fix multipart boundary reading

If we're not ready to read the first line after the multipart field
(which should be a "\r\n" line) then return Pending instead of Ready(None)
so that we will get called again to read that line.

Without this I was getting MultipartError::Boundary from read_boundary()
because it got the "\r\n" line instead of the boundary.

Also tweaks the test_stream test to test partial reads.

This is a forward port of #1189 from 1.0

* actix-multipart: Update changes for boundary fix
2019-12-12 07:03:44 +06:00
131c897099 upgrade to actix-net release 2019-12-11 19:20:20 +06:00
ef3a33b9d6 use std mutext instead of parking_lot 2019-12-10 09:00:51 +06:00
5132257b0d Fix buffer remaining capacity calcualtion 2019-12-09 21:55:22 +06:00
0c1f5f9edc Check Upgrade service readiness before calling it 2019-12-09 17:40:15 +06:00
e4382e4fc1 Fix broken docs (#1204)
Fixed un escaped brackets in lib.rs, and reflowed links to ConnectionInfo in app, config, and server.rs
2019-12-09 10:02:43 +06:00
a3ce371312 ws ping and pong uses bytes #1049 2019-12-09 07:01:22 +06:00
42258ee289 deps 2019-12-08 20:22:39 +06:00
b92eafb839 prepare actix-http release 2019-12-08 20:15:51 +06:00
3b2e78db47 add link to chat 2019-12-08 19:27:06 +06:00
63da1a5560 Merge branch 'master' of github.com:actix/actix-web 2019-12-08 19:26:12 +06:00
1f3ffe38e8 update actix-service dep 2019-12-08 19:25:24 +06:00
c23b6b3879 Merge pull request #1192 from krircc/master
Add rich project metadata
2019-12-08 16:03:39 +08:00
909c7c8b5b Merge branch 'master' into master 2019-12-08 16:26:35 +09:00
4a8a9ef405 update tests and clippy warnings 2019-12-08 12:31:16 +06:00
6c9f9fff73 clippy warnings 2019-12-08 00:46:51 +06:00
8df33f7a81 remove HttpServer::run() as it is not useful with async/await 2019-12-08 00:06:04 +06:00
7ec5ca88a1 update changes 2019-12-07 22:01:55 +06:00
e5f3d88a4e Switch brotli compressor to rust. (#1197)
* Switch to a rustified version of brotli.

* Some memory optimizations.

* Make brotli not optional anymore.
2019-12-07 21:55:41 +06:00
0ba125444a Add impl ResponseBuilder for Error 2019-12-07 21:41:34 +06:00
6c226e47bd prepare actix-web-actors release 2019-12-07 20:10:36 +06:00
8c3f58db9d Allow comma-separated websocket subprotocols without spaces (#1172)
* Allow comma-separated websocket subprotocols without spaces

* [CHANGES] Added an entry to CHANGES.md
2019-12-07 20:08:06 +06:00
4921243add Fix rustls build. (#1195) 2019-12-07 16:14:09 +06:00
91b3fcf85c Fix dependency features. (#1196) 2019-12-07 16:13:26 +06:00
1729a52f8b prepare alpha.3 release 2019-12-07 13:00:03 +06:00
ed2f3fe80d use actix-net alpha.3 release 2019-12-07 12:28:26 +06:00
f2ba389496 Merge branch 'master' into master 2019-12-06 16:57:42 +09:00
439f02b6b1 Update README.md 2019-12-06 14:59:11 +08:00
e32da08a26 Update README.md 2019-12-06 14:34:14 +08:00
82110e0927 Update README.md 2019-12-06 14:29:10 +08:00
7b3354a9ad Update README.md 2019-12-06 14:26:23 +08:00
5243e8baca Update README.md 2019-12-06 14:23:28 +08:00
98903028c7 Update README.md 2019-12-06 14:22:29 +08:00
7dd676439c update changes for actix-session 2019-12-06 11:24:25 +06:00
fbead137f0 feat: add access to UserSession from RequestHead (#1164)
* feat: add access to UserSession from RequestHead

* add test case for session from RequestHead and changes entry for the new feature
2019-12-06 11:21:43 +06:00
205a964d8f upgrade to tokio 0.2 2019-12-05 23:35:43 +06:00
b45c6cd66b replace hashbrown with std hashmap 2019-12-04 18:33:43 +06:00
0015a204aa update version 2019-12-03 19:03:53 +06:00
c7ed6d3428 update version 2019-12-03 16:35:31 +06:00
cf30eafb49 update md 2019-12-03 00:49:12 +06:00
14075ebf7f use released versions of actix-net 2019-12-02 23:33:39 +06:00
068f047dd5 update service factory config 2019-12-02 21:37:13 +06:00
f4c01384ec update to latest actix-net 2019-12-02 17:33:11 +06:00
b7d44d6c4c Merge pull request #1 from actix/master
git pull
2019-12-01 16:56:42 +08:00
33574403b5 Remove rustls from package.metadata.docs.rs (#1182) 2019-11-28 06:25:21 +06:00
dcc6efa3e6 Merge branch 'master' of github.com:actix/actix-web 2019-11-27 21:08:13 +06:00
56b9f11c98 disable rustls 2019-11-27 21:07:49 +06:00
f43a706364 Set name for each generated resource 2019-11-26 19:25:28 +06:00
f2b3dc5625 update examples 2019-11-26 17:16:33 +06:00
f73f97353b refactor ResponseError trait 2019-11-26 16:07:39 +06:00
4dc31aac93 use actix_rt::test for test setup 2019-11-26 11:25:50 +06:00
c1c44a7dd6 upgrade derive_more 2019-11-25 17:59:14 +06:00
c5907747ad Remove implementation of Responder for (). Fixes #1108.
Rationale:

- In Rust, one can omit a semicolon after a function's final expression to make
  its value the function's return value. It's common for people to include a
  semicolon after the last expression by mistake - common enough that the Rust
  compiler suggests removing the semicolon when there's a type mismatch between
  the function's signature and body. By implementing Responder for (), Actix makes
  this common mistake a silent error in handler functions.

- Functions returning an empty body should return HTTP status 204 ("No Content"),
  so the current Responder impl for (), which returns status 200 ("OK"), is not
  really what one wants anyway.

- It's not much of a burden to ask handlers to explicitly return
  `HttpResponse::Ok()` if that is what they want; all the examples in the
  documentation do this already.
2019-11-23 21:10:02 +06:00
525c22de15 fix typos from updating to futures 0.3 2019-11-22 13:25:55 +06:00
57981ca04a update tests to async handlers 2019-11-22 11:49:35 +06:00
e668acc596 update travis config 2019-11-22 10:13:32 +06:00
512dd2be63 disable rustls support 2019-11-22 07:01:05 +06:00
8683ba8bb0 rename .to_async() to .to() 2019-11-21 21:36:35 +06:00
0b9e3d381b add test with custom connector 2019-11-21 17:36:18 +06:00
1f0577f8d5 cleanup api doc examples 2019-11-21 16:02:17 +06:00
53c5151692 use response instead of result for asyn c handlers 2019-11-21 16:02:17 +06:00
55698f2524 migrade rest of middlewares 2019-11-21 16:02:17 +06:00
471f82f0e0 migrate actix-multipart 2019-11-21 16:02:17 +06:00
60ada97b3d migrate actix-session 2019-11-21 16:02:17 +06:00
0de101bc4d update actix-web-codegen tests 2019-11-21 16:02:17 +06:00
95e2a0ef2e migrate actix-framed 2019-11-21 16:02:17 +06:00
69cadcdedb migrate actix-files 2019-11-21 16:02:17 +06:00
6ac4ac66b9 migrate actix-cors 2019-11-21 16:02:17 +06:00
3646725cf6 migrate actix-identity 2019-11-21 16:02:17 +06:00
ff62facc0d disable unmigrated crates 2019-11-21 16:02:17 +06:00
b510527a9f update awc tests 2019-11-21 16:02:17 +06:00
3127dd4db6 migrate actix-web to std::future 2019-11-21 16:02:17 +06:00
d081e57316 fix h2 client send body 2019-11-21 16:02:17 +06:00
1ffa7d18d3 drop unpin constraint 2019-11-21 16:02:17 +06:00
687884fb94 update test-server tests 2019-11-21 16:02:17 +06:00
5ab29b2e62 migrate awc and test-server to std::future 2019-11-21 16:02:17 +06:00
a6a2d2f444 update ssl impls 2019-11-21 16:02:17 +06:00
9e95efcc16 migrate client to std::future 2019-11-21 16:02:17 +06:00
8cba1170e6 make actix-http compile with std::future 2019-11-21 16:02:17 +06:00
5cb2d500d1 update actix-web-actors 2019-11-14 08:58:24 +06:00
0212c618c6 prepare actix-web release 2019-11-14 08:55:37 +06:00
88110ed268 Add security note to ConnectionInfo::remote() (#1158) 2019-11-14 08:32:47 +06:00
fba02fdd8c prep awc release 2019-11-06 11:33:25 -08:00
b2934ad8d2 prep actix-file release 2019-11-06 11:25:26 -08:00
f7f410d033 fix test order dep 2019-11-06 11:20:47 -08:00
885ff7396e prepare actox-http release 2019-11-06 10:35:13 -08:00
61b38e8d0d Increase timeouts in test-server (#1153) 2019-11-06 06:09:22 -08:00
edcde67076 Fix escaping/encoding problems in Content-Disposition header (#1151)
* Fix filename encoding in Content-Disposition of acitx_files::NamedFile

* Add more comments on how to use Content-Disposition header properly & Fix some trivial problems

* Improve Content-Disposition filename(*) parameters of actix_files::NamedFile

* Tweak Content-Disposition parse to accept empty param value in quoted-string

* Fix typos in comments in .../content_disposition.rs (pointed out by @JohnTitor)

* Update CHANGES.md

* Update CHANGES.md again
2019-11-06 06:08:37 -08:00
f0612f7570 awc: Add support for setting query from Serialize type for client request (#1130)
Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2019-10-26 08:27:14 +03:00
ace98e3a1e support Host guards when Host header is unset (#1129) 2019-10-15 05:05:54 +06:00
1ca9d87f0a prep actix-web-codegen release 2019-10-14 21:35:53 +06:00
967f965405 Update syn & quote to 1.0 (#1133)
* chore(actix-web-codegen): Upgrade syn and quote to 1.0

* feat(actix-web-codegen): Generate better error message

* doc(actix-web-codegen): Update CHANGES.md

* fix: Build with stable rust
2019-10-14 21:34:17 +06:00
062e51e8ce prep actix-file release 2019-10-14 21:26:26 +06:00
a48e616def feat(files): add possibility to redirect to slash-ended path (#1134)
When accessing to a folder without a final slash, the index file will be loaded ok, but if it has
references (like a css or an image in an html file) these resources won't be loaded correctly if
they are using relative paths. In order to solve this, this PR adds the possibility to detect
folders without a final slash and make a 302 redirect to mitigate this issue. The behavior is off by
default. We're adding a new method called `redirect_to_slash_directory` which can be used to enable
this behavior.
2019-10-14 21:23:15 +06:00
effa96f5e4 Removed httpcode 'MovedPermanenty'. (#1128) 2019-10-12 06:45:12 +06:00
cc0b4be5b7 Fix typo in response.rs body() comment (#1126)
Fixes https://github.com/actix/actix-web/issues/1125
2019-10-09 19:11:55 +06:00
a464ffc23d prepare actix-files release 2019-10-08 10:13:16 +06:00
4de2e8a898 [actix-files] Allow user defined guards for NamedFile (actix#1113) (#1115)
* [actix-files] remove request method checks from NamedFile

* [actix-files] added custom guard checks to FilesService

* [actix-files] modify method check tests (NamedFile -> Files)

* [actix-files] add test for custom guards in Files

* [actix-files] update changelog
2019-10-08 10:09:40 +06:00
0f09415469 Convert documentation examples to Rust 2018 edition (#1120)
* Convert types::query examples to rust-2018 edition

* Convert types::json examples to rust-2018 edition

* Convert types::path examples to rust-2018 edition

* Convert types::form examples to rust-2018 edition

* Convert rest of the examples to rust-2018 edition.
2019-10-07 11:29:11 +06:00
f089cf185b Let ResponseError render w/ 'text/plain; charset=utf-8' header (#1118) (#1119)
* Let ResponseError render w/ 'text/plain; charset=utf-8' header (#1118)

Trait ResponseError originally render Error messages with header
`text/plain` , which causes browsers (i.e. Firefox 70.0) with
Non-English locale unable to render UTF-8 responses with non-English
characters correctly. i.e. emoji.

This fix solved this problem by specifying the charset of `text/plain`
as utf-8, which is the default charset in rust.

Before actix-web consider to support other charsets, this hotfix is
 enough.

Test case:

fn test() -> Result<String, actix_web::Error> {
    Err(actix_web::error::ErrorForbidden("😋test"))
}

* Update actix-http/CHANGES.md for #1118
2019-10-07 10:56:24 +06:00
15d3c1ae81 Update docs of guard.rs (#1116)
* Update guard.rs
2019-10-07 12:05:17 +09:00
fba31d4e0a Expose ContentDisposition in actix-multipart to fix broken doc link (#1114)
* Expose ContentDisposition in actix-multipart to fix broken doc link

* Revert "Expose ContentDisposition in actix-multipart to fix broken doc link"

This reverts commit e90d71d16c.

* Unhide actix-http::header::common docs

These types are used in other exported documented interfaces and create
broken links if not documented.
See `actix_multipart::Field.content_disposition`
2019-10-02 09:48:25 +06:00
f81ae37677 Add From<Payload> for crate::dev::Payload (#1110)
* Add From<Payload> for crate::dev::Payload

* Make dev::Payload field of Payload public and add into_inner method

* Add changelog entry
2019-10-01 14:05:38 +06:00
5169d306ae update ConnectionInfo.remote() doc string 2019-09-27 07:03:12 +06:00
4f3e97fff8 prepare actix-web release 2019-09-25 15:39:09 +06:00
3ff01a9fc4 Add changelog entry for #1101 (#1102) 2019-09-25 15:35:28 +06:00
3d4e45a0e5 prepare release 2019-09-25 15:30:20 +06:00
c659c33919 Feature uds: Add listen_uds to ServerBuilder (#1085)
Allows using an existing Unix Listener instead of binding to a path.
Useful for when running as a daemon under systemd.

Change-Id: I54a0e78c321d8b7a9ded381083217af590e9a7fa
2019-09-25 15:16:51 +06:00
959f7754b2 Merge pull request #1101 from actix/add-awc-get-head-methods
Add remaining getter methods from private head field
2019-09-25 10:23:23 +02:00
23f04c4f38 Add remaining getter methods from private head field 2019-09-25 08:50:45 +02:00
d9af8f66ba Use actix-testing for testing utils 2019-09-25 10:28:41 +06:00
aa39b8ca6f Add support for serde_json::Value to be passed as argument to ResponseBuilder.body() (#1096)
* Add support for serde_json::Value to be passed as argument to ResponseBuilder.body()

* Update actix-http/CHANGES.md
2019-09-25 09:33:52 +06:00
58c7065f08 Implement register_data method on Resource and Scope. (#1094)
* Implement `register_data` method on `Resource` and `Scope`.

* Split Scope::register_data tests out from Scope::data tests.

* CHANGES.md: Mention {Scope,Resource}::register_data.
2019-09-18 06:36:39 +06:00
b3783b403e Merge branch 'master' of github.com:actix/actix-web 2019-09-17 21:46:45 +06:00
e4503046de Do not override current System 2019-09-17 21:45:06 +06:00
32a1c36597 Make UrlencodedError::Overflow more informative (#1089) 2019-09-17 06:58:04 +06:00
7c9f9afc46 Add ability to use Infallible as HttpResponse error type (#1093)
* Add `std::convert::Infallible` implementantion for `ResponseError`

* Add from `std::convert::Infallible` to `Error`

* Remove `ResponseError` implementantion for `Infallible`

* Remove useless docs

* Better comment

* Update changelog

* Update actix_http::changelog
2019-09-17 06:57:38 +06:00
c1f99e0775 Remove mem::uninitialized() (#1090) 2019-09-16 07:52:23 +09:00
a32573bb58 Allow to re-construct ServiceRequest from HttpRequest and Payload #1088 2019-09-13 11:56:24 +06:00
e35d930ef9 prepare releases 2019-09-12 21:58:08 +06:00
60b7aebd0a fmt & clippy 2019-09-12 21:52:46 +06:00
45d2fd4299 export frozen request related types; refactor code layout 2019-09-12 10:40:56 +06:00
71f8577713 prepare awc release 2019-09-11 20:13:28 +06:00
043f763c51 prepare actix-http release 2019-09-11 20:07:39 +06:00
8873e9b39e Added FrozenClientRequest for easier retrying HTTP calls (#1064)
* Initial commit

* Added extra_headers

* Added freeze() method to ClientRequest which produces a 'read-only' copy of a request suitable for retrying the send operation

* Additional methods for FrozenClientRequest

* Fix

* Increased crates versions

* Fixed a unit test. Added one more unit test.

* Added RequestHeaderWrapper

* Small fixes

* Renamed RequestHeadWrapper->RequestHeadType

* Updated CHANGES.md files

* Small fix

* Small changes

* Removed *_extra methods from Connection trait

* Added FrozenSendBuilder

* Added FrozenSendBuilder

* Minor fix

* Replaced impl Future with concrete Future implementation

* Small renaming

* Renamed Send->SendBody
2019-09-10 10:29:32 +06:00
5e8f1c338c fix h2 not using error response (#1080)
* fix h2 not using error response

* add fix change log

* fix h2 service error tests
2019-09-09 16:24:57 +06:00
1d96ae9bc3 actix-multipart: Correctly parse multipart body which does not end in CRLF (#1042)
* Correctly parse multipart body which does not end in CRLF

* Add in an eof guard for extra safety
2019-09-09 13:58:00 +06:00
8d61fe0925 Ensure that awc::ws::WebsocketsRequest sets the Host header (#1070)
* Ensure that awc::ws::WebsocketsRequest sets the Host header before connecting.

* Make sure to check if headers already have a HOST value before setting

* Update CHANGES.md to reflect WebSocket client update.
2019-09-09 12:27:13 +06:00
8a9fcddb3c Condition middleware (#1075)
* add condition middleware

* write tests

* update changes

* Update src/middleware/condition.rs

Co-Authored-By: Yuki Okushi <huyuumi.dev@gmail.com>

* Update src/middleware/condition.rs

Co-Authored-By: Yuki Okushi <huyuumi.dev@gmail.com>

* Update src/middleware/condition.rs

Co-Authored-By: Yuki Okushi <huyuumi.dev@gmail.com>

* Update src/middleware/condition.rs

Co-Authored-By: Yuki Okushi <huyuumi.dev@gmail.com>
2019-09-09 12:26:38 +06:00
c9400456f6 update actix-connect ver 2019-09-02 15:20:28 -07:00
63ddd30ee4 on_connect result isnt added to request extensions for http2 requests #1009 2019-09-01 13:15:02 +06:00
bae29897d6 prep actix-web release 2019-08-29 09:36:16 +06:00
616981ecf9 clear extensions before reclaiming HttpRequests in their pool (#1063)
Issue #1062
2019-08-29 09:35:05 +06:00
98bf8ab098 enable rust-tls feature for actix_web::client #1045 2019-08-28 21:40:24 +06:00
c193137905 actix_web::test::TestRequest::set_form (#1058) 2019-08-28 21:32:17 +06:00
a07cdd6533 Data::into_inner 2019-08-27 17:25:25 +01:00
61e492e7e3 Prepare actix-multipart 0.1.3 release 2019-08-18 10:39:22 +09:00
23d768a77b Add explicit dyns (#1041)
* Add explicit `dyn`s

* Remove unnecessary lines
2019-08-17 02:45:44 +09:00
87b7162473 chore(readme): fix copy paste error (#1040)
Fix actix-cors README
2019-08-16 09:21:30 +09:00
979c4d44f4 update awc dep 2019-08-13 12:41:26 -07:00
5d248cad89 prep release 2019-08-13 12:28:05 -07:00
b1cb72d088 update url crate 2019-08-13 11:03:24 -07:00
55179d6ab2 update dependencies 2019-08-13 10:48:11 -07:00
192dfff680 prepare actix-http 0.2.9 release 2019-08-13 15:20:29 +02:00
915010e733 Fixes a bug in OpenWaitingConnection where the h2 flow would panic a future (#1031) 2019-08-13 14:55:04 +02:00
dbe4c9ffb5 Replace deprecated methods in actix_files (#1027)
* Bump up mime_guess to 2.0.1

* Replace deprecated methods

* Update CHANGE.md
2019-08-12 05:43:29 +09:00
0ee69671ba Update nightly to 2019-08-10 (#1028) 2019-08-12 04:00:13 +09:00
80e1d16ab8 Merge pull request #1023 from lukaslueg/byteorder_removed
Remove byteorder-dependency
2019-08-07 12:28:23 -03:00
b70de5b991 Update CHANGES.md 2019-08-07 16:43:03 +02:00
0b9e692298 Remove byteorder-dependency 2019-08-06 18:32:36 +02:00
cf1a60cb3a prepare awc release 2019-08-01 15:41:14 -07:00
0d15861e23 prepare actix-http release 2019-08-01 15:26:30 -07:00
cb19ebfe0c add rustls support for actix-http and awc (#998)
* add rustls support for actix-http and awc

* fix features conflict

* remove unnecessary duplication

* test server with rust-tls

* fix

* test rustls

* awc rustls test

* format

* tests

* fix dependencies

* fixes and add changes

* remove test-server and Cargo.toml dev-dependencies changes

* cargo fmt
2019-07-31 13:02:56 -07:00
0d9ea41047 update min rust version 2019-07-31 06:49:46 -07:00
e9b4aa205f Merge branch 'master' of github.com:actix/actix-web 2019-07-30 08:00:57 -07:00
7674f1173c fix awc client panic #1016 2019-07-30 08:00:46 -07:00
511026cab0 Allow HeaderMap to be cloned (#1014)
* Allow HeaderMap to be cloned

* Add entry to changelog
2019-07-29 08:11:23 +04:00
81ab37f235 Fix two dyn warnings (#1015) 2019-07-29 08:10:33 +04:00
6f2049ba9b Fix typo 2019-07-25 12:54:59 +01:00
52372fcbea actix-files: "Specified path is not a directory" error now includes the path (#1004) 2019-07-23 06:41:58 +06:00
f3751d83f8 Modify response body only if encoder is not None #997 2019-07-22 11:35:00 +06:00
b0b462581b update CHANGES.md for Form impl Responder 2019-07-20 14:46:46 +01:00
8f48ed2597 impl Responder for Form 2019-07-20 14:46:46 +01:00
c96068e78d bump version 2019-07-20 11:46:21 +06:00
7bca1f7d8d Allow to disable Content-Disposition header #686 2019-07-20 11:43:49 +06:00
3618a84164 update changes 2019-07-20 11:27:21 +06:00
03ca408e94 add support for specifying protocols on websocket handshake (#835)
* add support for specifying protocols on websocket handshake

* separated the handshake function with and without protocols
changed protocols type from Vec<&str> to [&str]
2019-07-20 11:22:06 +06:00
e53e9c8ba3 Add the start_with_addr() function, to obtain an addr to the target websocket actor (#988) 2019-07-20 11:17:58 +06:00
941241c5f0 Remove unneeded actix-utils dependency 2019-07-20 10:50:36 +06:00
f8320fedd8 add note about Query decoding (#992) 2019-07-19 17:37:49 +06:00
c808364c07 make Query payload public (#991) 2019-07-19 15:47:44 +06:00
cccd829656 update changes 2019-07-19 11:07:52 +06:00
3650f6d7b8 Re-implement Host predicate (#989)
* update HostGuard implementation

* update/add tests for new HostGuard implementation
2019-07-19 10:28:43 +06:00
6b7df6b242 prep actix-web release 2019-07-18 17:51:51 +06:00
b6ff786ed3 update dependencies 2019-07-18 17:50:10 +06:00
9c3789cbd0 revert DateServiceInner changes 2019-07-18 17:37:41 +06:00
29098f8397 Add support for downcasting response errors (#986)
* Add support for downcasting response errors

* Added test for error casting
2019-07-18 17:25:50 +06:00
fbdda8acb1 Unix domain sockets (HttpServer::bind_uds) #92 2019-07-18 17:24:12 +06:00
d03296237e Log error results in Logger middleware (closes #938) (#984)
* Log error results in Logger middleware (closes #938)

* Log internal server errors with an ERROR log level

* Logger middleware: don't log 500 internal server errors, as Actix now logs them always

* Changelog
2019-07-18 14:31:18 +06:00
b36fdc46db Remove several usages of 'unsafe' (#968)
* Replace UnsafeCell in DateServiceInner with Cell

The previous API was extremely dangerous - calling `get_ref()`
followed by `reset()` would trigger instant UB, without requiring
any `unsafe` blocks in the caller.

By making DateInner `Copy`, we can use a normal `Cell` instead
of an `UnsafeCell`. This makes it impossible to cause UB (or even panic)
with the API.

* Split unsafe block HttpServiceHandlerResponse

Also add explanation of the safety of the usage of `unsafe`

* Replace UnsafeCell with RefCell in PayloadRef

This ensures that a mistake in the usage of 'get_mut' will cause
a panic, not undefined behavior.
2019-07-18 04:45:17 +06:00
2a2d7f5768 nightly clippy warnings 2019-07-17 15:53:51 +06:00
4092c7f326 clippy warnings 2019-07-17 15:08:30 +06:00
ef3e1037a8 bump version 2019-07-17 14:18:26 +06:00
baaa7b3fbb Replace ClonableService with local copy 2019-07-17 13:55:44 +06:00
32718b7e31 Expose factory traits and some clippy fixes (#983) 2019-07-17 12:58:42 +06:00
c01611d8b5 prepare actix-web release 2019-07-17 12:07:12 +06:00
7b1dcaffda cleanup deprecation warning for Box<dyn> 2019-07-17 11:44:39 +06:00
c65dbaf88e expose app's ResourceMap via resource_map method 2019-07-17 11:33:05 +06:00
c45728ac01 prep test server release 2019-07-16 10:21:52 +06:00
6f71409355 Add DELETE, PATCH, OPTIONS methods to TestServerRunner (#973) 2019-07-16 10:19:28 +06:00
8d17c8651f update bench link 2019-07-11 14:45:58 +06:00
b1143168e5 Impl Responder for (T, StatusCode) where T: Responder (#954) 2019-07-11 14:42:58 +06:00
69456991f6 update api doc example for client and add panic info for connection_info 2019-07-11 14:40:37 +06:00
f410f3330f prepare actix-session release 2019-07-08 23:25:51 +06:00
e1fcd203f8 Update the copyless version to 0.1.4 (#956)
< 0.1.4 failed to check for null when doing allocations which could lead to null dereferences.
2019-07-08 15:48:20 +06:00
0d8a4304a9 Drop a duplicated word (#958) 2019-07-05 20:46:55 +06:00
14cc5a5d6b Merge pull request #912 from Dowwie/master
updated actix-session to support login and logout functionality
2019-07-03 21:07:07 -04:00
287c2b1d18 Merge branch 'master' into master 2019-07-03 18:50:19 -04:00
7596ab69e0 reverted actix-web/CHANGES.md 2019-07-03 08:55:29 -04:00
1fdd77bffa reworded session info in CHANGES 2019-07-03 07:56:50 -04:00
2d424957fb updated version in Cargo to 0.2 2019-07-03 07:50:45 -04:00
dabc4fe00b updated actix-session/CHANGES with info 2019-07-03 07:50:11 -04:00
5bf5b0acd2 updated CHANGES with info about actix-session update 2019-07-03 07:46:46 -04:00
099a8ff7d8 updated session cookie to support login, logout, changes 2019-07-01 15:26:19 -04:00
a28b7139e6 prepare awc release 2019-07-01 11:34:57 +06:00
a0a469fe85 disable travis cargo cache 2019-07-01 11:33:11 +06:00
dbab55dd6b Bump rand crate version to 0.7 (#951) 2019-07-01 09:37:03 +06:00
d2eb1edac3 Actix-web client: Always append a colon after username in basic auth (#949)
* Always append a colon after username in basic auth

* Update CHANGES.md
2019-07-01 09:34:42 +06:00
5901dfee1a Fix link to actix-cors (#950) 2019-06-30 21:30:04 +06:00
0e05b37082 updated cookie session to update on change 2019-06-29 14:24:02 -04:00
37f4ce8604 Fixes typo in docs. (#948)
Small typo in docs.
2019-06-29 10:38:16 +06:00
12b5174850 update deps 2019-06-28 14:46:26 +06:00
b77ed193f7 prepare actix-web release 2019-06-28 14:41:56 +06:00
d286ccb4f5 Add on-connect callback #946 2019-06-28 14:34:26 +06:00
cac162aed7 update actix-http changes 2019-06-28 12:34:43 +06:00
a3a78ac6fb Do not set Content-Length header, let actix-http set it #930 2019-06-28 11:42:20 +06:00
596483ff55 prepare actix-web-actors release 2019-06-28 10:54:23 +06:00
768859513a Expose the max limit for payload sizes in Websocket Actors. #925 (#933)
* Expose the max limit for payload sizes in Websocket Actors.

* Revert to previous not-formatted code.

* Implement WebsocketContext::with_codec and make Codec Copy and Clone.

* Fix formatting.

* Fix formatting.
2019-06-28 10:49:03 +06:00
44bb79cd07 Call req.path() on Json extractor error only (#945)
* Call req.path() on Json extractor error only

* Cleanup len parse code
2019-06-28 10:44:53 +06:00
af9fb5d190 Support asynchronous data factories #850 2019-06-28 10:43:52 +06:00
50a9d9e2c5 Merge branch 'master' into master 2019-06-27 06:38:13 -04:00
c0c71f82c0 Fixes typo. (#940)
Small typo fix.
2019-06-25 23:23:36 +06:00
93855b889a Merge branch 'master' into master 2019-06-24 18:41:48 -04:00
fa7e0fe6df updated cookie.rs req to get_changes 2019-06-24 18:40:14 -04:00
b948f74b54 Extractor configuration Migration (#937)
added guide for Extractor configuration in MIGRATION.md
2019-06-24 07:16:04 +06:00
1a24ff8717 Add builder function for HTTP 429 Too Many Requests status (#931) 2019-06-21 13:06:29 +06:00
47fab0e393 Bump derive_more crate version to 0.15.0 in actix-cors (#927) 2019-06-19 16:41:42 +06:00
313ac48765 Use encoding_rs crate instead of unmaintained encoding crate (#922)
* Use encoding_rs crate instead of unmaintained encoding crate

* Update changelog
2019-06-18 12:43:25 +06:00
d7780d53c9 Fix typo in actix_web::web::Data::get_ref docstring (#921) 2019-06-18 07:27:23 +06:00
ad0e6f73b3 update version 2019-06-17 12:35:00 +06:00
546a8a58db remove cors and identity middlewares 2019-06-17 12:33:00 +06:00
acda1c075a prepare actix-web release 2019-06-17 12:23:30 +06:00
686e5f1595 update deps 2019-06-16 22:10:22 +06:00
d2b6502c7a prepare actix-http release 2019-06-16 21:59:22 +06:00
7c0f570845 Do not compress NoContent (204) responses #918 2019-06-16 21:54:17 +06:00
382d4ca216 Merge branch 'master' into master 2019-06-15 22:21:39 +06:00
eaa371db8b update migration 2019-06-15 22:20:46 +06:00
d293ae2a69 fix nested resource map registration #915 2019-06-15 22:12:20 +06:00
d7ec241fd0 re-export identity and cors middleware 2019-06-15 21:47:06 +06:00
cd323f2ff1 Move cors middleware to actix-cors crate 2019-06-15 09:34:16 +06:00
32a66a99bf reverting change to get_session due to side effects 2019-06-13 09:19:03 -04:00
73ae801a13 Merge branch 'master' of https://github.com/Dowwie/actix-web 2019-06-13 09:00:45 -04:00
ca4ed0932e made Session::get_session public 2019-06-13 08:59:59 -04:00
bf48798bce Content-Length is 0 for NamedFile HEAD request #914 2019-06-13 15:27:21 +06:00
9fc7c8b1af Merge branch 'master' into master 2019-06-12 23:53:36 +06:00
c8118e8411 fix path doc tests 2019-06-12 20:12:15 +06:00
65732197b8 modified so as to consider unanticipated state changes 2019-06-12 10:11:38 -04:00
959eef05ae updated actix-session to support login and logout functionality (renew and purge) 2019-06-12 08:03:27 -04:00
e7ba67e1a8 rename PathPayloadError and test for path config 2019-06-12 17:02:45 +06:00
13e618b128 Added initial support for PathConfig, allows setting custom error handler. (#903) 2019-06-12 16:49:56 +06:00
36e6f0cb4b add "put" and "sput" methods for test server (#909) 2019-06-12 16:47:00 +06:00
7450ae37a7 Re-apply patch from #637 #894 2019-06-12 16:45:05 +06:00
2ffda29f9b Allow to test an app that uses async actors #897 2019-06-12 16:15:06 +06:00
ff724e239d move identity service separate crate 2019-06-12 15:52:48 +06:00
Bob
ee769832cf get_identity from HttpMessage (#908)
* get_identity from HttpMessage

* more doc for RequestIdentity
2019-06-12 09:26:46 +06:00
c4b7980b4f Upgraded actix-web dependency and set default-features to false (#895) 2019-06-07 09:34:56 +06:00
bfbac4f875 Upgraded actix-web dependency and set default-features to false (#900) 2019-06-07 09:34:30 +06:00
53e2f8090f Mark default enabled package features in the docs (#890) 2019-06-06 11:14:56 +06:00
e399e01a22 update readme 2019-06-05 09:02:44 +06:00
d9a62c4bbf add App::register_data() 2019-06-05 08:43:39 +06:00
a548b69679 fmt 2019-06-05 08:43:13 +06:00
ae64475d98 test-server release 2019-06-05 08:27:25 +06:00
a342b1289d prep awc release 2019-06-05 08:14:00 +06:00
38f04b75a7 update deps 2019-06-04 22:36:10 +06:00
a771540b16 prepare actix-web-codegen release 2019-06-04 22:33:43 +06:00
cf217d35a8 Added HEAD, CONNECT, OPTIONS and TRACE to the codegen (#886)
* Added HEAD, CONNECT, OPTIONS and TRACE to the codegen

* Add new macros to use statement

* Add patch to supported codegen http methods

* Update CHANGES.md

Added head, options, trace, connect and patch codegen changes to CHANGES.md
2019-06-04 22:30:43 +06:00
0e138e111f add external resource support on scope level 2019-06-03 23:41:32 +06:00
1fce4876f3 Scope configuration (#880)
* WIP: Scope configuarion

* Extensions: Fix into_iter()

* Scope: Fix tests

* Add ScopeConfig to web

Committing from mobile, if this doesn't look good it's because I haven't tested it...

* Scope Config: Use ServiceConfig instead

* Scope: Switch to ServiceConfig in doc

* ScopeConfig: Remove unnecessary changes, handle the case when data is empty

* ScopeConfig: Remove changes from actix-http
2019-06-03 23:12:37 +06:00
4a179d1ae1 prepare actix-session release 2019-06-03 10:52:43 +06:00
a780ea10e9 Guard cookie mod by cookie-session feature (#883)
Signed-off-by: Igor Gnatenko <i.gnatenko.brain@gmail.com>
2019-06-03 10:30:30 +06:00
6d2e190c8e prepare actix-files release 2019-06-02 13:09:21 +06:00
b1cfbdcf7a prepare actix-http release 2019-06-02 13:05:22 +06:00
24180f9014 Fix boundary parsing #876 2019-06-02 12:58:37 +06:00
15cdc680f6 Static files are incorrectly served as both chunked and with length #812 2019-06-01 17:57:40 +06:00
666756bfbe body helpers 2019-06-01 17:57:25 +06:00
a1b40f4314 add license files 2019-06-01 17:25:29 +06:00
29a0fe76d5 prepare actix-web-codegen release 2019-06-01 17:21:22 +06:00
7753b9da6d web-codegen: Add extra-traits to syn features (#879)
```rust
error[E0277]: `syn::attr::NestedMeta` doesn't implement `std::fmt::Debug`
   --> src/route.rs:149:57
    |
149 |                 attr => panic!("Unknown attribute{:?}", attr),
    |                                                         ^^^^ `syn::attr::NestedMeta` cannot be formatted using `{:?}` because it doesn't implement `std::fmt::Debug`
    |
    = help: the trait `std::fmt::Debug` is not implemented for `syn::attr::NestedMeta`
    = note: required because of the requirements on the impl of `std::fmt::Debug` for `&syn::attr::NestedMeta`
    = note: required by `std::fmt::Debug::fmt`
```
2019-06-01 14:13:45 +06:00
f1764bba43 Fix Logger time format (use rfc3339) (#867)
* Fix Logger time format (use rfc3339)

* Update change log
2019-05-31 12:09:21 +04:00
c2d7db7e06 prepare actix-web-actors release 2019-05-29 16:22:57 -07:00
21418c7414 prep actix-http release 2019-05-29 16:15:12 -07:00
fe781345d5 Add Migration steps for Custom Error (#869)
Adds migration steps for custom error in 1.0
2019-05-29 20:47:04 +04:00
a614be7cb5 Don't DISCONNECT from stream when reader is empty (#870)
* Don't DISCONNECT from stream when reader is empty

* Fix chunked transfer: poll_request before closing stream + Test
2019-05-29 20:37:42 +04:00
1eb89b8375 remove debug prints 2019-05-25 03:16:53 -07:00
aa626a1e72 handle disconnects 2019-05-25 03:16:46 -07:00
7f12b754e9 Handle socket read disconnect 2019-05-25 03:07:40 -07:00
3f196f469d update version 2019-05-25 02:13:04 -07:00
35eb378585 prepare actix-files release 2019-05-25 02:02:28 -07:00
6db625f55b Update actix-web dep to 1.0.0-rc (#864) 2019-05-25 01:52:23 -07:00
801cc2ed5d Cleaned unnecessary Option<_> around ServerBuilder in server.rs/HttpServer (#863) 2019-05-23 05:21:02 -07:00
ded1e86e7e Add ServiceRequest::set_payload() method 2019-05-22 21:25:51 -07:00
babf48c550 fix NamedFile last-modified check #820 2019-05-22 21:21:12 -07:00
d3e807f6e9 move Payload to inner http request 2019-05-22 11:49:27 -07:00
7746e785c1 re-export Service and Transform traits 2019-05-22 11:20:37 -07:00
4e141d7f5d Merge branch 'master' of github.com:actix/actix-web 2019-05-22 11:18:42 -07:00
12842871fe Clear http requests pool on app service drop #860 2019-05-22 11:18:33 -07:00
fc85ae4014 small documentation fix (#856) 2019-05-21 10:43:18 -07:00
5826f39dbe Add set_json method to TestRequest (#851)
- Takes a type which implements serde::Serialize, serializes it to JSON,
and sets it as the payload. The content-type is also set to JSON.
2019-05-18 19:36:28 -07:00
8ff56d7cd5 prepare actix-session release 2019-05-18 11:20:09 -07:00
0843bce7ba prepare actix-multipart 2019-05-18 11:15:58 -07:00
dea0e0a721 update actix-server dep 2019-05-18 11:00:33 -07:00
e857ab1f81 HttpServer::shutdown_timeout u16 to u64 (#849)
Increase maximum graceful shutdown time from 18 hours.

For issue #848.
2019-05-18 10:50:35 -07:00
0dda4b06ea prepare release 2019-05-18 10:49:59 -07:00
cbe0226177 update changes 2019-05-18 10:47:08 -07:00
e8c8626878 update deps 2019-05-18 09:54:23 -07:00
4b215e0839 Support Query<T>::from_query() (#846) 2019-05-17 13:10:46 -07:00
e1ff3bf8fa fix resource match with params #841 2019-05-15 10:31:40 -07:00
80f4ef9aac When using codegen with paths that have parameters then only the first endpoint resolves (#842) 2019-05-15 09:21:07 -07:00
bba90d7f22 Query config (#839)
* add QueryConfig

* expose QueryConfig in web module

* fmt

* use associated type for QueryConfig

* update CHANGES.md
2019-05-14 13:54:30 -07:00
f8af3b86e5 export set_date 2019-05-14 08:48:11 -07:00
6c3d8b8738 Make JsonConfig send (#830)
* replace Rc with Arc

* add Send trait requirement for Fn in JsonConfig error handler

* add Sync trait requirement for Fn in JsonConfig error handler

* use associated type inside JsonConfig

* fix lint: members in the impl has the same order in the trait

* Update CHANGES.md
2019-05-12 20:04:08 -07:00
5a90e33bcc update deps 2019-05-12 12:01:24 -07:00
86b569e320 version 2019-05-12 11:56:01 -07:00
2350a2dc68 Handle cancellation of uploads #834 #736 2019-05-12 11:43:05 -07:00
36d017dcc6 update deps 2019-05-12 11:41:43 -07:00
3bb081852c prep actix-session release 2019-05-12 10:53:21 -07:00
1ca58e876b prepare beta4 release 2019-05-12 10:49:21 -07:00
e9cbcbaf03 update dependencies 2019-05-12 10:18:02 -07:00
07c9eec803 prepare awc release 2019-05-12 10:04:38 -07:00
beae9ca0f7 update changes 2019-05-12 09:57:16 -07:00
07b9707ca1 prepare actix-http release 2019-05-12 09:56:55 -07:00
45c05978b0 Allow to set/override app data on scope level 2019-05-12 09:42:05 -07:00
df08baf67f update actix-net dependencies 2019-05-12 08:34:51 -07:00
4066375737 Update CHANGES.md 2019-05-10 14:45:30 -07:00
a77b0b054a Make App::configure take an FnOnce (#825) 2019-05-10 14:44:49 -07:00
a17ff492a1 fix formatting 2019-05-04 22:18:59 -07:00
33b4c05557 add payload stream migration entry 2019-05-04 22:18:02 -07:00
005c055a7f prepare actix-web release 2019-05-04 20:05:20 -07:00
3d1af19080 prepare actix-http release 2019-05-04 19:51:13 -07:00
fa78da8156 unify route and app data, it allows to provide global extractor config #775 2019-05-04 19:43:49 -07:00
01cfcf3b75 update changes 2019-05-04 08:42:27 -07:00
7ef4f5ac0b Make request headers optional in CORS preflight (#816) 2019-05-04 08:41:37 -07:00
fc19ce41c4 Clean up response extensions in response pool #817 2019-05-03 15:26:34 -07:00
6e00eef63a awc: Fix typo on ResponseError documentation (#815)
* awc: Fix typo on ResponseError documentation

Signed-off-by: Otavio Salvador <otavio@ossystems.com.br>

* http: Fix typo on ResponseError documentation

Signed-off-by: Otavio Salvador <otavio@ossystems.com.br>

* http: Expand type names for openssl related errors documentation

Signed-off-by: Otavio Salvador <otavio@ossystems.com.br>
2019-05-03 14:30:00 -07:00
337c2febe3 add more tests 2019-05-02 09:49:10 -07:00
f27beab016 fix case for transfer-encoding header name 2019-05-02 09:30:00 -07:00
4f1c6d1bb7 Update MIGRATION.md (#811) 2019-05-02 09:26:51 -07:00
6b34909537 Fix NormalizePath middleware impl #806 2019-05-01 12:40:56 -07:00
87284f0951 Add doctest to verify NormalizePath middleware (#809) 2019-05-01 11:47:51 -07:00
24bd5b1344 update readmes 2019-04-29 20:47:21 -07:00
94a0d1a6bc remove old api doc refs 2019-04-29 18:42:21 -07:00
f4e1205cbb fix reactor drop panic 2019-04-29 10:14:08 -07:00
d2c1791067 add async handler test with blocking call 2019-04-29 09:45:37 -07:00
f4b4875cb1 Add helper function for executing futures test::block_fn() 2019-04-29 09:34:14 -07:00
29a841529f Allow to construct Data instances to avoid double Arc for Send + Sync types. 2019-04-29 09:26:12 -07:00
b51b5b763c added clarification to docs regarding middleware processing sequence, added delete method to TestRequest (#799)
* added clarification to docs regarding middleware processing sequnce

* added delete method to TestRequest, doc, and test
2019-04-29 09:14:36 -07:00
8db6b48a76 update version 2019-04-28 09:09:18 -07:00
ffd2c04cd3 Add helper trait UserSession which allows to get session for ServiceRequest and HttpRequest 2019-04-28 09:08:51 -07:00
70a4c36496 use Error explicitly 2019-04-25 11:14:32 -07:00
cba78e06ae update changes 2019-04-24 15:42:34 -07:00
3b3dbb4f40 add raw services support 2019-04-24 15:29:15 -07:00
7300002226 grammar fixes (#796) 2019-04-24 13:21:42 -07:00
5426413cb6 update dependencies 2019-04-24 13:00:30 -07:00
2bc937f6c3 prepare release 2019-04-24 12:50:44 -07:00
60fa0d5427 Store visit and login timestamp in the identity cookie (#502)
This allows to verify time of login or last visit and therfore limiting
the danger of leaked cookies.
2019-04-24 12:49:56 -07:00
f429d3319f Read until eof for http/1.0 responses #771 2019-04-24 11:57:40 -07:00
2e19f572ee add tests for camel case headers rendering 2019-04-24 11:27:57 -07:00
64f603b076 Support to set header names of ClientRequest as Camel-Case (#713)
* Support to set header names of `ClientRequest` as Camel-Case

This is the case for supporting to request for servers which don't
perfectly implement the `RFC 7230`. It is important for an app
which uses `ClientRequest` as core part.

* Add field `upper_camel_case_headers` to `ClientRequest`.

* Add function `set_upper_camel_case_headers` to `ClientRequest`
  and `ClientRequestBuilder` to set field `upper_camel_case_headers`.

* Add trait `client::writer::UpperCamelCaseHeader` for
  `http::header::HeaderName`, let it can be converted to Camel-Case
  then writed to buffer.

* Add test `test_client::test_upper_camel_case_headers`.

* Support upper Camel-Case headers

* [actix-http] Add field `upper_camel_case_headers` for `RequestHead`
* [actix-http] Add code for `MessageType` to support upper camel case
* [awc] Add functions for `ClientRequest` to set upper camel case

* Use `Flags::CAMEL_CASE` for upper camel case of headers
2019-04-24 10:48:49 -07:00
679d1cd513 allow to override responder's status code and headers 2019-04-24 10:25:46 -07:00
42644dac3f prepare actix-http-test release 2019-04-24 07:31:33 -07:00
898ef57080 Fix async web::Data factory handling 2019-04-23 21:21:49 -07:00
9702b2d88e add client h2 reuse test 2019-04-23 15:06:30 -07:00
d2b0afd859 Fix http client pool and wait queue management 2019-04-23 14:57:03 -07:00
5f6a1a8249 update version 2019-04-23 09:45:39 -07:00
5d531989e7 Fix BorrowMutError panic in client connector #793 2019-04-23 09:42:19 -07:00
3532602299 Added support for remainder match (i.e /path/{tail}*) 2019-04-22 21:22:17 -07:00
48bee55087 .to_async() handler can return Responder type #792 2019-04-22 14:22:08 -07:00
d00c9bb844 do not consume boundary 2019-04-21 16:14:09 -07:00
895e409d57 Optimize multipart handling #634, #769 2019-04-21 15:41:01 -07:00
f0789aad05 update dep versions 2019-04-21 09:03:46 -07:00
7e480ab2f7 beta.1 release 2019-04-20 21:16:51 -07:00
891f857547 update changes 2019-04-20 11:18:04 -07:00
01b1350dcc update versions 2019-04-19 18:16:01 -07:00
5e4e95fb0a update create version 2019-04-19 18:13:05 -07:00
9f421b81b8 fix non-ssl connector 2019-04-19 18:10:53 -07:00
6decfdda1f update deps 2019-04-19 18:06:34 -07:00
fc9b14a933 allow to specify server address for http and ws requests 2019-04-19 18:03:44 -07:00
7292d0b696 drop chrono and use i64 for max age 2019-04-19 17:23:17 -07:00
a3844c1bfd update version 2019-04-19 13:55:36 -07:00
791f22bbc8 replate time::Duration with chrono::Duration and add max_age_time method (#789)
* feat: replate time::Duration with chrono::Duration

* feat: rename max_age method which accepts `Duration` to max_age_time and add new max_age method accepting isize of seconds

* feat: replace `time:Duration` with `chrono:Duration` in repo `actix-http`
2019-04-19 13:54:44 -07:00
1e7f97a111 Add Normalization middleware for in place (#783) 2019-04-19 13:53:49 -07:00
bc40f5ae40 Merge pull request #788 from Dowwie/master
added put and patch to TestRequest, docs, and test
2019-04-19 06:55:44 -04:00
3504a8fc0a Merge branch 'master' into master 2019-04-19 04:38:41 -04:00
bfe0df5ab0 update tests 2019-04-18 21:28:23 -07:00
ed94df189f Merge branch 'master' into master 2019-04-18 19:03:48 -04:00
aa255298ef make ServiceRequest::from_parts private, as it is not safe to create from parts 2019-04-18 16:03:13 -07:00
da86b6e062 added put and patch to TestRequest, docs, and test 2019-04-18 18:06:32 -04:00
75e340137d use local version of http-test 2019-04-18 12:23:56 -07:00
e659e09e29 update tests 2019-04-18 11:01:04 -07:00
163ca89cf4 more tests 2019-04-17 17:48:25 -07:00
85b598a614 add cookie session test 2019-04-17 11:02:03 -07:00
b64851c5ec enable runtime for test:: methods 2019-04-17 10:28:27 -07:00
cc8420377e pass request ownership to closure instead of ref 2019-04-16 15:43:55 -07:00
5740f1e63a prepare actix-framed release 2019-04-16 11:18:47 -07:00
c943e95812 update dependencies 2019-04-16 11:17:29 -07:00
4c0ebd55d3 prepare actix-http-test release 2019-04-16 11:02:26 -07:00
e7ec77aa81 update readme 2019-04-16 10:50:37 -07:00
ddfd7523f7 prepare awc release 2019-04-16 10:49:38 -07:00
2986077a28 no need for feature 2019-04-16 10:32:48 -07:00
3744957804 actix_http::encoding always available 2019-04-16 10:27:58 -07:00
420d3064c5 Add .peer_addr() #744 2019-04-16 10:11:38 -07:00
eb4f6b74fb Merge branch 'master' of github.com:actix/actix-web 2019-04-16 09:58:07 -07:00
a116c4c2c7 Expose peer addr via Request::peer_addr() and RequestHead::peer_addr 2019-04-16 09:54:02 -07:00
7f674febb1 add 422 to httpcodes.rs (#782) 2019-04-15 16:55:06 -07:00
14252f5ef2 use test::call_service 2019-04-15 09:09:21 -07:00
7a28b32f6d Rename test::call_success to test::call_service 2019-04-15 07:44:07 -07:00
09cdf1e302 Rename RouterConfig to ServiceConfig 2019-04-15 07:32:49 -07:00
1eebd47072 fix warnings 2019-04-14 21:00:16 -07:00
002c41a7ca update trust-dns 2019-04-14 20:45:44 -07:00
ab4fda6084 update tests 2019-04-14 20:20:33 -07:00
f9078d41cd add test::read_response; fix TestRequest::app_data() 2019-04-14 19:52:12 -07:00
4cc2b38059 added read_response_json for testing (#776)
* added read_response_json for testing

* cleaned up

* modied docs for read_response_json

* typo in doc

* test code in doc should compile now

* use type coercion in doc

* removed generic R, replaced with Request
2019-04-14 16:25:45 -07:00
d7040dc303 alpha.6 release 2019-04-14 08:09:32 -07:00
6bc1a0c76b Do not set default headers for websocket request 2019-04-14 07:43:53 -07:00
5bd5651faa Allow to use any service as default service 2019-04-13 22:25:00 -07:00
32ac159ba2 update migration 2019-04-13 16:51:41 -07:00
ee33f52736 make extractor config type explicit 2019-04-13 16:35:25 -07:00
4f30fa9d46 Remove generic type for request payload, always use default 2019-04-13 14:50:54 -07:00
043f6e77ae remove nested multipart support 2019-04-13 10:11:07 -07:00
48518df883 do not generate all docs; use docs.rs for 1.0 docs 2019-04-13 09:35:23 -07:00
1f2b15397d prepare alpha5 release 2019-04-12 14:00:45 -07:00
87167f6581 update actix-connect 2019-04-12 12:33:11 -07:00
b4768a8f81 add TestRequest::run(), allows to run async functions 2019-04-12 11:28:57 -07:00
3fb7343e73 provide during test request construction 2019-04-12 11:22:18 -07:00
5cfba5ff16 add FramedRequest builder for testing 2019-04-12 11:15:58 -07:00
67c34a5937 Add Debug impl for BoxedSocket 2019-04-11 16:01:54 -07:00
94d7a7f873 custom future for SendError service 2019-04-11 15:12:23 -07:00
d86567fbdc revert SendResponse::Error type 2019-04-11 14:18:58 -07:00
d115b3b3ed ws verifyciation takes RequestHead; add SendError utility service 2019-04-11 14:00:32 -07:00
6420a2fe1f update client example 2019-04-10 20:57:18 -07:00
0eed9e5257 add more migration 2019-04-10 20:51:57 -07:00
7801fcb993 update migration 2019-04-10 20:47:28 -07:00
e55be4dba6 add FramedRequest helper methods 2019-04-10 19:57:34 -07:00
12e1dad42e export TestBuffer 2019-04-10 19:43:09 -07:00
7cd59c38d3 rename framed App 2019-04-10 18:08:28 -07:00
8dc4a88aa6 add actix-framed 2019-04-10 15:06:27 -07:00
52aebb3bca fmt 2019-04-10 15:05:03 -07:00
6b42b2aaee remove framed for now 2019-04-10 12:55:56 -07:00
6ab9838977 added some error logging for extractors: Data, Json, Query, and Path (#765)
* added some error logging for extractors

* changed log::error to log::debug and fixed position of log for path

* added request path to debug logs
2019-04-10 12:45:13 -07:00
9d82d4dfb9 Fix body propagation in Response::from_error. #760 2019-04-10 12:43:31 -07:00
9bb40c249f add h1::SendResponse future; renamed to MessageBody::size 2019-04-10 12:24:17 -07:00
046b7a1425 Expand codegen to allow specify guards and async 2019-04-10 15:43:18 +03:00
c22a3a71f2 fix test 2019-04-08 19:07:11 -07:00
9c9940d88d update readme 2019-04-08 17:53:19 -07:00
561f83d044 add upgrade service support to h1 dispatcher 2019-04-08 17:51:14 -07:00
43d325a139 allow to specify upgrade service 2019-04-08 14:51:16 -07:00
0a6dd0efdf fix compression tests 2019-04-08 12:48:39 -07:00
b921abf18f set host header for http1 connections 2019-04-08 12:48:26 -07:00
9bcd5d6664 updated legacy code in call_success example (#762) 2019-04-08 11:20:46 -07:00
bc58dbb2f5 add async expect service test 2019-04-08 11:19:56 -07:00
b1547bbbb6 do not set default headers 2019-04-08 11:09:57 -07:00
a7fdac1043 fix expect service registration and tests 2019-04-08 10:31:29 -07:00
53da55aa3c alpha4 release 2019-04-07 23:42:05 -07:00
aa78565453 use objects pool for HttpRequest; optimize nested services call 2019-04-07 23:06:21 -07:00
75b213a6f0 refactor FromRequest trait 2019-04-07 14:43:07 -07:00
3c650ca194 remove buffer capacity for payload 2019-04-07 10:40:45 -07:00
219baf3323 remove PayloadWriter trait 2019-04-07 10:29:26 -07:00
ec09d6fbe6 optimize encode headers and body split 2019-04-07 10:03:38 -07:00
68d2203dd6 run travis with stable rust only 2019-04-07 08:17:29 -07:00
748289f0ff use custom headers map; more optimizations 2019-04-06 15:02:02 -07:00
4ef46e26f9 Merge branch 'master' of github.com:actix/actix-web 2019-04-06 08:13:14 -07:00
3872d3ba5a refactor h1 dispatcher 2019-04-06 08:12:58 -07:00
b1523ab78c started 1.0 migration guide (#758) 2019-04-06 07:39:20 -07:00
fbedaec661 add expect: 100-continue support #141 2019-04-05 16:46:44 -07:00
02fcaca3da add backward compatibility 2019-04-05 11:36:26 -07:00
18593d8476 updated Connector docs and renamed service() to finish() (#757)
* added Connector to actix-web::client namespace

* updated Connector, renaming service() to finish() and adding docs

* added doc for finish method on Connector
2019-04-05 11:34:27 -07:00
b6dacaa23a remove SendError and SendResponse services 2019-04-05 11:29:42 -07:00
f89321fd01 fix import 2019-04-05 10:50:11 -07:00
0d4a8e1b1c update actix-connect 2019-04-05 10:35:14 -07:00
162cd3eecd added Connector to actix-web::client namespace (#756) 2019-04-05 07:37:00 -07:00
a655bdac52 Fix clippy warning (#755) 2019-04-05 12:34:24 +03:00
309c480782 encoder sent uncompressed data before compressed 2019-04-04 15:03:40 -07:00
9c205f9f1d update tests for content-encoding 2019-04-04 14:00:56 -07:00
1f5c0f50f9 Add minimal std::error::Error impl for Error 2019-04-04 13:23:38 -07:00
d8bc66a18e Use thread pool for response body comression 2019-04-04 13:17:55 -07:00
bc834f6a03 remove some static contraints 2019-04-04 10:59:34 -07:00
dc7c3d37a1 upgrade router 2019-04-03 21:45:30 -07:00
1e2bd68e83 Render error and return as response body 2019-04-03 19:55:19 -07:00
954fe21751 set response error body 2019-04-03 19:07:25 -07:00
7d6085ddbd Add %U (URLPath) for logger (#752)
* Add %R (Route) for logger

* Requested Updates (Route => URLPath, %R => %U)
2019-04-03 17:41:42 -07:00
cef3dc3586 added app_data() method 2019-04-03 15:25:52 -07:00
237bfba1ed add App::configure() - allow to offload app configuration to different methods 2019-04-03 15:09:31 -07:00
dfa0abf5a5 Export IntoHeaderValue 2019-04-03 12:44:47 -07:00
e738361e09 move multipart support to separate crate 2019-04-03 12:28:58 -07:00
f56072954b remove PayloadBuffer 2019-04-03 03:20:20 -07:00
2a89b995aa do not cleanup travis build 2019-04-02 21:56:38 -07:00
442f5057dd alpha.3 release 2019-04-02 21:49:31 -07:00
19eef36f8f Merge branch 'tarpaulin' 2019-04-02 21:11:03 -07:00
51d5006ccf Detect socket disconnection during protocol selection 2019-04-02 20:50:25 -07:00
3aebe09e5c travis 2019-04-02 19:21:22 -07:00
4227cddd30 fix dev dependencies 2019-04-02 15:00:10 -07:00
db1f7651a3 more patch cratesio 2019-04-02 14:47:59 -07:00
00000fb316 mut obj 2019-04-02 14:27:54 -07:00
f100976ef0 rename close_connection to force_close 2019-04-02 14:08:30 -07:00
deac983bc7 fix test-server workspace setup 2019-04-02 14:04:28 -07:00
bca31eb7ad remove Deref 2019-04-02 13:35:01 -07:00
e282ef7925 return back consuming builder 2019-04-02 12:51:16 -07:00
49a499ce74 properly allocate read buffer 2019-04-02 11:11:32 -07:00
d067b1d5f1 do not use static 2019-04-02 10:53:44 -07:00
c27fbdc35f Preallocate read buffer for h1 codec, #749 2019-04-02 10:19:56 -07:00
1bd0995d7a remove unneded & 2019-04-01 18:00:38 -07:00
2d43489278 ClientRequest::json() accepts reference instead of object 2019-04-01 17:53:30 -07:00
89a0a50e14 Merge branch 'master' of github.com:actix/actix-web 2019-04-01 15:20:04 -07:00
38afc93304 Use non-consuming builder pattern for ClientRequest 2019-04-01 15:19:34 -07:00
03c84be1f2 Merge pull request #750 from Dowwie/master
added docs for wrap and wrap_fn
2019-04-01 17:37:04 -04:00
6d169f4c9c Merge branch 'master' of https://github.com/Dowwie/actix-web 2019-04-01 15:10:49 -04:00
3dd3f7bc92 updated scope wrap doc 2019-04-01 15:10:28 -04:00
e6936d9f73 Merge branch 'master' into master 2019-04-01 14:53:23 -04:00
03dfbdfcdd updated wrap and wrap fn descriptions, still requiring viable examples 2019-04-01 14:52:05 -04:00
5c4e4edda4 add ClientResponse::json() 2019-04-01 11:51:18 -07:00
c5fa6c1abe do not consume response 2019-04-01 11:29:50 -07:00
6c195d8521 add Derev<Target=RequestHead> for ClientRequest 2019-04-01 10:26:25 -07:00
96fd61f3d5 rust 1.31.0 compatibility 2019-04-01 10:26:09 -07:00
8800b8ef13 mentioned re-use in wrap doc 2019-04-01 09:59:21 -04:00
220c04b7b3 added docs for wrap and wrap_fn 2019-04-01 09:30:11 -04:00
34695f4bce rename test methods; update tests 2019-03-31 20:43:00 -07:00
15c5a3bcfb fix test 2019-03-31 18:57:54 -07:00
ab45974e35 add default handler 2019-03-31 18:19:18 -07:00
e4b3f79458 allocate enough space 2019-03-31 17:05:02 -07:00
ce8294740e fix tests with disabled features 2019-03-31 17:04:34 -07:00
ddf5089bff Warn when an unsealed private cookie isn't valid UTF-8 (#746) 2019-03-31 16:26:56 +03:00
7596d0b7cb fix fn_guard doc string 2019-03-30 20:48:00 -07:00
1a871d708e update guard doc test 2019-03-30 12:13:21 -07:00
351df84cca update stable release api doc link 2019-03-30 11:37:56 -07:00
6fcbe4bcda add fn_guard 2019-03-30 11:33:31 -07:00
457b75c995 update api docs; move web to submodule 2019-03-30 10:04:38 -07:00
724e9c2efb replace deprecated fn 2019-03-30 07:56:09 -07:00
2e159d1eb9 test-server: Request functions should accept path (#743) 2019-03-30 07:53:45 -07:00
a20b9fd354 prepare aplha2 release 2019-03-29 22:06:14 -07:00
d846328f36 fork cookie crate 2019-03-29 21:13:39 -07:00
193f8fb2d9 update tests 2019-03-29 18:51:07 -07:00
3a954298d7 Merge branch 'master' of github.com:actix/actix-web 2019-03-29 18:23:07 -07:00
3220777ff9 Added ws::Message::Nop, no-op websockets message 2019-03-29 18:22:49 -07:00
00526f60dc Impl BodyEncoding for Response (#740) 2019-03-29 16:29:11 -07:00
c126713f40 add rustls support to HttpServer 2019-03-29 16:28:32 -07:00
e9bbde6832 allow to override request's uri 2019-03-29 16:28:28 -07:00
5eb3f1154e revert 2019-03-29 14:27:22 -07:00
aebeb511cd explicit impl traits for ws connect 2019-03-29 14:26:11 -07:00
744d82431d add per request timeout 2019-03-29 14:07:37 -07:00
058b1d56e6 Export ws sub-module with websockets related types 2019-03-29 13:49:21 -07:00
709475b2bb multipart::Field renamed to MultipartField 2019-03-29 11:59:38 -07:00
19a0b8046b remove actix reference 2019-03-29 11:13:36 -07:00
1e7096a63a add request timeout 2019-03-28 22:33:41 -07:00
ea4d98d669 Session wide headers, basic and bearer auth 2019-03-28 21:48:35 -07:00
3b897da8e2 Do not use thread pool for decomression if chunk size is smaller than 2048 2019-03-28 21:15:26 -07:00
10b166404e Merge branch 'master' of github.com:actix/actix-web 2019-03-28 20:27:59 -07:00
80ff7d40a1 enable awc/ssl if ssl features is enabled 2019-03-28 20:27:47 -07:00
c4a8bbe47b fix the example in README.md (#739) 2019-03-28 20:03:17 -07:00
9710e9b01f Re-export actix_http::client::Connector 2019-03-28 14:46:33 -07:00
1d79f16529 update release api docs link 2019-03-28 14:30:38 -07:00
670a457013 fix docs.rs feature list 2019-03-28 14:28:59 -07:00
878f32c495 fix tests for no-default-features 2019-03-28 14:27:07 -07:00
a2c9ff3a33 back to development 2019-03-28 14:10:03 -07:00
9c198a0d29 alpha.1 release 2019-03-28 13:46:26 -07:00
226 changed files with 27684 additions and 16048 deletions

View File

@ -1,41 +0,0 @@
environment:
global:
PROJECT_NAME: actix-web
matrix:
# Stable channel
- TARGET: i686-pc-windows-msvc
CHANNEL: stable
- TARGET: x86_64-pc-windows-gnu
CHANNEL: stable
- TARGET: x86_64-pc-windows-msvc
CHANNEL: stable
# Nightly channel
- TARGET: i686-pc-windows-msvc
CHANNEL: nightly
- TARGET: x86_64-pc-windows-gnu
CHANNEL: nightly
- TARGET: x86_64-pc-windows-msvc
CHANNEL: nightly
# Install Rust and Cargo
# (Based on from https://github.com/rust-lang/libc/blob/master/appveyor.yml)
install:
- ps: >-
If ($Env:TARGET -eq 'x86_64-pc-windows-gnu') {
$Env:PATH += ';C:\msys64\mingw64\bin'
} ElseIf ($Env:TARGET -eq 'i686-pc-windows-gnu') {
$Env:PATH += ';C:\MinGW\bin'
}
- curl -sSf -o rustup-init.exe https://win.rustup.rs
- rustup-init.exe --default-host %TARGET% --default-toolchain %CHANNEL% -y
- set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
- rustc -Vv
- cargo -V
# 'cargo test' takes care of building for us, so disable Appveyor's build stage.
build: false
# Equivalent to Travis' `script` phase
test_script:
- cargo clean
- cargo test --no-default-features --features="flate2-rust"

37
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@ -0,0 +1,37 @@
---
name: Bug Report
about: Create a bug report.
---
Your issue may already be reported!
Please search on the [Actix Web issue tracker](https://github.com/actix/actix-web/issues) before creating one.
## Expected Behavior
<!--- If you're describing a bug, tell us what should happen -->
<!--- If you're suggesting a change/improvement, tell us how it should work -->
## Current Behavior
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
## Possible Solution
<!--- Not obligatory, but suggest a fix/reason for the bug, -->
<!--- or ideas how to implement the addition or change -->
## Steps to Reproduce (for bugs)
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
<!--- reproduce this bug. Include code to reproduce, if relevant -->
1.
2.
3.
4.
## Context
<!--- How has this issue affected you? What are you trying to accomplish? -->
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
## Your Environment
<!--- Include as many relevant details about the environment you experienced the bug in -->
* Rust Version (I.e, output of `rustc -V`):
* Actix Web Version:

8
.github/ISSUE_TEMPLATE/config.yml vendored Normal file
View File

@ -0,0 +1,8 @@
blank_issues_enabled: true
contact_links:
- name: Gitter channel (actix-web)
url: https://gitter.im/actix/actix-web
about: Please ask and answer questions about the actix-web here.
- name: Gitter channel (actix)
url: https://gitter.im/actix/actix
about: Please ask and answer questions about the actix here.

27
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,27 @@
<!-- Thanks for considering contributing actix! -->
<!-- Please fill out the following to make our reviews easy. -->
## PR Type
<!-- What kind of change does this PR make? -->
<!-- Bug Fix / Feature / Refactor / Code Style / Other -->
INSERT_PR_TYPE
## PR Checklist
Check your PR fulfills the following:
<!-- For draft PRs check the boxes as you complete them. -->
- [ ] Tests for the changes have been added / updated.
- [ ] Documentation comments have been added / updated.
- [ ] A changelog entry has been made for the appropriate packages.
- [ ] Format code with the latest stable rustfmt
## Overview
<!-- Describe the current and new behavior. -->
<!-- Emphasize any breaking changes. -->
<!-- If this PR fixes or closes an issue, reference it here. -->
<!-- Closes #000 -->

28
.github/workflows/bench.yml vendored Normal file
View File

@ -0,0 +1,28 @@
name: Benchmark (Linux)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
jobs:
check_benchmark:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: nightly
profile: minimal
override: true
- name: Check benchmark
uses: actions-rs/cargo@v1
with:
command: bench
args: --bench=server -- --sample-size=15

32
.github/workflows/clippy-fmt.yml vendored Normal file
View File

@ -0,0 +1,32 @@
on:
pull_request:
types: [opened, synchronize, reopened]
name: Clippy and rustfmt Check
jobs:
clippy_check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
toolchain: stable
components: rustfmt
override: true
- name: Check with rustfmt
uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly
components: clippy
override: true
- name: Check with Clippy
uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --all-features --all --tests

69
.github/workflows/linux.yml vendored Normal file
View File

@ -0,0 +1,69 @@
name: CI (Linux)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- 1.42.0 # MSRV
- stable
- nightly
name: ${{ matrix.version }} - x86_64-unknown-linux-gnu
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --all --all-features --no-fail-fast -- --nocapture
- name: tests (actix-http)
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --package=actix-http --no-default-features --features=rustls -- --nocapture
- name: tests (awc)
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --package=awc --no-default-features --features=rustls -- --nocapture
- name: Generate coverage file
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
run: |
cargo install cargo-tarpaulin --vers "^0.13"
cargo tarpaulin --out Xml
- name: Upload to Codecov
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
uses: codecov/codecov-action@v1
with:
file: cobertura.xml

44
.github/workflows/macos.yml vendored Normal file
View File

@ -0,0 +1,44 @@
name: CI (macOS)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- stable
- nightly
name: ${{ matrix.version }} - x86_64-apple-darwin
runs-on: macOS-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-apple-darwin
profile: minimal
override: true
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
with:
command: test
args: --all --all-features --no-fail-fast -- --nocapture
--skip=test_h2_content_length
--skip=test_reading_deflate_encoding_large_random_rustls

37
.github/workflows/upload-doc.yml vendored Normal file
View File

@ -0,0 +1,37 @@
name: Upload documentation
on:
push:
branches:
- master
jobs:
build:
runs-on: ubuntu-latest
if: github.repository == 'actix/actix-web'
steps:
- uses: actions/checkout@v2
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: check build
uses: actions-rs/cargo@v1
with:
command: doc
args: --no-deps --workspace --all-features
- name: Tweak HTML
run: echo "<meta http-equiv=refresh content=0;url=os_balloon/index.html>" > target/doc/index.html
- name: Deploy to GitHub Pages
uses: JamesIves/github-pages-deploy-action@3.5.8
with:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
BRANCH: gh-pages
FOLDER: target/doc

64
.github/workflows/windows.yml vendored Normal file
View File

@ -0,0 +1,64 @@
name: CI (Windows)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
env:
VCPKGRS_DYNAMIC: 1
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- stable
- nightly
name: ${{ matrix.version }} - x86_64-pc-windows-msvc
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-pc-windows-msvc
profile: minimal
override: true
- name: Install OpenSSL
run: |
vcpkg integrate install
vcpkg install openssl:x64-windows
Copy-Item C:\vcpkg\installed\x64-windows\bin\libcrypto-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libcrypto.dll
Copy-Item C:\vcpkg\installed\x64-windows\bin\libssl-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libssl.dll
Get-ChildItem C:\vcpkg\installed\x64-windows\bin
Get-ChildItem C:\vcpkg\installed\x64-windows\lib
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
with:
command: test
args: --all --all-features --no-fail-fast -- --nocapture
--skip=test_h2_content_length
--skip=test_reading_deflate_encoding_large_random_rustls
--skip=test_params
--skip=test_simple
--skip=test_expect_continue
--skip=test_http10_keepalive
--skip=test_slow_request
--skip=test_connection_force_close
--skip=test_connection_server_close
--skip=test_connection_wait_queue_force_close

1
.gitignore vendored
View File

@ -9,6 +9,7 @@ guide/build/
*.pid
*.sock
*~
.DS_Store
# These are backup files generated by rustfmt
**/*.rs.bk

View File

@ -1,55 +0,0 @@
language: rust
sudo: required
dist: trusty
cache:
cargo: true
apt: true
matrix:
include:
- rust: stable
- rust: beta
- rust: nightly-2019-03-02
allow_failures:
- rust: nightly-2019-03-02
env:
global:
# - RUSTFLAGS="-C link-dead-code"
- OPENSSL_VERSION=openssl-1.0.2
before_install:
- sudo add-apt-repository -y ppa:0k53d-karl-f830m/openssl
- sudo apt-get update -qq
- sudo apt-get install -y openssl libssl-dev libelf-dev libdw-dev cmake gcc binutils-dev libiberty-dev
before_cache: |
if [[ "$TRAVIS_RUST_VERSION" == "nightly-2019-03-02" ]]; then
RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install cargo-tarpaulin
fi
# Add clippy
before_script:
- export PATH=$PATH:~/.cargo/bin
script:
- cargo clean
- cargo test --all-features --all -- --nocapture
# Upload docs
after_success:
- |
if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_PULL_REQUEST" = "false" && "$TRAVIS_BRANCH" == "master" && "$TRAVIS_RUST_VERSION" == "stable" ]]; then
cargo doc --all-features --no-deps &&
echo "<meta http-equiv=refresh content=0;url=os_balloon/index.html>" > target/doc/index.html &&
git clone https://github.com/davisp/ghp-import.git &&
./ghp-import/ghp_import.py -n -p -f -m "Documentation upload" -r https://"$GH_TOKEN"@github.com/"$TRAVIS_REPO_SLUG.git" target/doc &&
echo "Uploaded documentation"
fi
- |
if [[ "$TRAVIS_RUST_VERSION" == "nightly-2019-03-02" ]]; then
taskset -c 0 cargo tarpaulin --out Xml --all --all-features
bash <(curl -s https://codecov.io/bash)
echo "Uploaded code coverage"
fi

View File

@ -1,7 +1,523 @@
# Changes
## [1.0.0-alpha.1] - 2019-03-x
## Unreleased - 2020-xx-xx
## 3.0.0 - 2020-09-11
* No significant changes from `3.0.0-beta.4`.
## 3.0.0-beta.4 - 2020-09-09
### Added
* `middleware::NormalizePath` now has configurable behaviour for either always having a trailing
slash, or as the new addition, always trimming trailing slashes. [#1639]
### Changed
* Update actix-codec and actix-utils dependencies. [#1634]
* `FormConfig` and `JsonConfig` configurations are now also considered when set
using `App::data`. [#1641]
* `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`. [#1655]
* `HttpServer::maxconnrate` is renamed to the more expressive
`HttpServer::max_connection_rate`. [#1655]
[#1639]: https://github.com/actix/actix-web/pull/1639
[#1641]: https://github.com/actix/actix-web/pull/1641
[#1634]: https://github.com/actix/actix-web/pull/1634
[#1655]: https://github.com/actix/actix-web/pull/1655
## 3.0.0-beta.3 - 2020-08-17
### Changed
* Update `rustls` to 0.18
## 3.0.0-beta.2 - 2020-08-17
### Changed
* `PayloadConfig` is now also considered in `Bytes` and `String` extractors when set
using `App::data`. [#1610]
* `web::Path` now has a public representation: `web::Path(pub T)` that enables
destructuring. [#1594]
* `ServiceRequest::app_data` allows retrieval of non-Data data without splitting into parts to
access `HttpRequest` which already allows this. [#1618]
* Re-export all error types from `awc`. [#1621]
* MSRV is now 1.42.0.
### Fixed
* Memory leak of app data in pooled requests. [#1609]
[#1594]: https://github.com/actix/actix-web/pull/1594
[#1609]: https://github.com/actix/actix-web/pull/1609
[#1610]: https://github.com/actix/actix-web/pull/1610
[#1618]: https://github.com/actix/actix-web/pull/1618
[#1621]: https://github.com/actix/actix-web/pull/1621
## 3.0.0-beta.1 - 2020-07-13
### Added
* Re-export `actix_rt::main` as `actix_web::main`.
* `HttpRequest::match_pattern` and `ServiceRequest::match_pattern` for extracting the matched
resource pattern.
* `HttpRequest::match_name` and `ServiceRequest::match_name` for extracting matched resource name.
### Changed
* Fix actix_http::h1::dispatcher so it returns when HW_BUFFER_SIZE is reached. Should reduce peak memory consumption during large uploads. [#1550]
* Migrate cookie handling to `cookie` crate. Actix-web no longer requires `ring` dependency.
* MSRV is now 1.41.1
### Fixed
* `NormalizePath` improved consistency when path needs slashes added _and_ removed.
## 3.0.0-alpha.3 - 2020-05-21
### Added
* Add option to create `Data<T>` from `Arc<T>` [#1509]
### Changed
* Resources and Scopes can now access non-overridden data types set on App (or containing scopes) when setting their own data. [#1486]
* Fix audit issue logging by default peer address [#1485]
* Bump minimum supported Rust version to 1.40
* Replace deprecated `net2` crate with `socket2`
[#1485]: https://github.com/actix/actix-web/pull/1485
[#1509]: https://github.com/actix/actix-web/pull/1509
## [3.0.0-alpha.2] - 2020-05-08
### Changed
* `{Resource,Scope}::default_service(f)` handlers now support app data extraction. [#1452]
* Implement `std::error::Error` for our custom errors [#1422]
* NormalizePath middleware now appends trailing / so that routes of form /example/ respond to /example requests. [#1433]
* Remove the `failure` feature and support.
[#1422]: https://github.com/actix/actix-web/pull/1422
[#1433]: https://github.com/actix/actix-web/pull/1433
[#1452]: https://github.com/actix/actix-web/pull/1452
[#1486]: https://github.com/actix/actix-web/pull/1486
## [3.0.0-alpha.1] - 2020-03-11
### Added
* Add helper function for creating routes with `TRACE` method guard `web::trace()`
* Add convenience functions `test::read_body_json()` and `test::TestRequest::send_request()` for testing.
### Changed
* Use `sha-1` crate instead of unmaintained `sha1` crate
* Skip empty chunks when returning response from a `Stream` [#1308]
* Update the `time` dependency to 0.2.7
* Update `actix-tls` dependency to 2.0.0-alpha.1
* Update `rustls` dependency to 0.17
[#1308]: https://github.com/actix/actix-web/pull/1308
## [2.0.0] - 2019-12-25
### Changed
* Rename `HttpServer::start()` to `HttpServer::run()`
* Allow to gracefully stop test server via `TestServer::stop()`
* Allow to specify multi-patterns for resources
## [2.0.0-rc] - 2019-12-20
### Changed
* Move `BodyEncoding` to `dev` module #1220
* Allow to set `peer_addr` for TestRequest #1074
* Make web::Data deref to Arc<T> #1214
* Rename `App::register_data()` to `App::app_data()`
* `HttpRequest::app_data<T>()` returns `Option<&T>` instead of `Option<&Data<T>>`
### Fixed
* Fix `AppConfig::secure()` is always false. #1202
## [2.0.0-alpha.6] - 2019-12-15
### Fixed
* Fixed compilation with default features off
## [2.0.0-alpha.5] - 2019-12-13
### Added
* Add test server, `test::start()` and `test::start_with()`
## [2.0.0-alpha.4] - 2019-12-08
### Deleted
* Delete HttpServer::run(), it is not useful witht async/await
## [2.0.0-alpha.3] - 2019-12-07
### Changed
* Migrate to tokio 0.2
## [2.0.0-alpha.1] - 2019-11-22
### Changed
* Migrated to `std::future`
* Remove implementation of `Responder` for `()`. (#1167)
## [1.0.9] - 2019-11-14
### Added
* Add `Payload::into_inner` method and make stored `def::Payload` public. (#1110)
### Changed
* Support `Host` guards when the `Host` header is unset (e.g. HTTP/2 requests) (#1129)
## [1.0.8] - 2019-09-25
### Added
* Add `Scope::register_data` and `Resource::register_data` methods, parallel to
`App::register_data`.
* Add `middleware::Condition` that conditionally enables another middleware
* Allow to re-construct `ServiceRequest` from `HttpRequest` and `Payload`
* Add `HttpServer::listen_uds` for ability to listen on UDS FD rather than path,
which is useful for example with systemd.
### Changed
* Make UrlEncodedError::Overflow more informativve
* Use actix-testing for testing utils
## [1.0.7] - 2019-08-29
### Fixed
* Request Extensions leak #1062
## [1.0.6] - 2019-08-28
### Added
* Re-implement Host predicate (#989)
* Form immplements Responder, returning a `application/x-www-form-urlencoded` response
* Add `into_inner` to `Data`
* Add `test::TestRequest::set_form()` convenience method to automatically serialize data and set
the header in test requests.
### Changed
* `Query` payload made `pub`. Allows user to pattern-match the payload.
* Enable `rust-tls` feature for client #1045
* Update serde_urlencoded to 0.6.1
* Update url to 2.1
## [1.0.5] - 2019-07-18
### Added
* Unix domain sockets (HttpServer::bind_uds) #92
* Actix now logs errors resulting in "internal server error" responses always, with the `error`
logging level
### Fixed
* Restored logging of errors through the `Logger` middleware
## [1.0.4] - 2019-07-17
### Added
* Add `Responder` impl for `(T, StatusCode) where T: Responder`
* Allow to access app's resource map via
`ServiceRequest::resource_map()` and `HttpRequest::resource_map()` methods.
### Changed
* Upgrade `rand` dependency version to 0.7
## [1.0.3] - 2019-06-28
### Added
* Support asynchronous data factories #850
### Changed
* Use `encoding_rs` crate instead of unmaintained `encoding` crate
## [1.0.2] - 2019-06-17
### Changed
* Move cors middleware to `actix-cors` crate.
* Move identity middleware to `actix-identity` crate.
## [1.0.1] - 2019-06-17
### Added
* Add support for PathConfig #903
* Add `middleware::identity::RequestIdentity` trait to `get_identity` from `HttpMessage`.
### Changed
* Move cors middleware to `actix-cors` crate.
* Move identity middleware to `actix-identity` crate.
* Disable default feature `secure-cookies`.
* Allow to test an app that uses async actors #897
* Re-apply patch from #637 #894
### Fixed
* HttpRequest::url_for is broken with nested scopes #915
## [1.0.0] - 2019-06-05
### Added
* Add `Scope::configure()` method.
* Add `ServiceRequest::set_payload()` method.
* Add `test::TestRequest::set_json()` convenience method to automatically
serialize data and set header in test requests.
* Add macros for head, options, trace, connect and patch http methods
### Changed
* Drop an unnecessary `Option<_>` indirection around `ServerBuilder` from `HttpServer`. #863
### Fixed
* Fix Logger request time format, and use rfc3339. #867
* Clear http requests pool on app service drop #860
## [1.0.0-rc] - 2019-05-18
### Add
* Add `Query<T>::from_query()` to extract parameters from a query string. #846
* `QueryConfig`, similar to `JsonConfig` for customizing error handling of query extractors.
### Changed
* `JsonConfig` is now `Send + Sync`, this implies that `error_handler` must be `Send + Sync` too.
### Fixed
* Codegen with parameters in the path only resolves the first registered endpoint #841
## [1.0.0-beta.4] - 2019-05-12
### Add
* Allow to set/override app data on scope level
### Changed
* `App::configure` take an `FnOnce` instead of `Fn`
* Upgrade actix-net crates
## [1.0.0-beta.3] - 2019-05-04
### Added
* Add helper function for executing futures `test::block_fn()`
### Changed
* Extractor configuration could be registered with `App::data()`
or with `Resource::data()` #775
* Route data is unified with app data, `Route::data()` moved to resource
level to `Resource::data()`
* CORS handling without headers #702
* Allow to construct `Data` instances to avoid double `Arc` for `Send + Sync` types.
### Fixed
* Fix `NormalizePath` middleware impl #806
### Deleted
* `App::data_factory()` is deleted.
## [1.0.0-beta.2] - 2019-04-24
### Added
* Add raw services support via `web::service()`
* Add helper functions for reading response body `test::read_body()`
* Add support for `remainder match` (i.e "/path/{tail}*")
* Extend `Responder` trait, allow to override status code and headers.
* Store visit and login timestamp in the identity cookie #502
### Changed
* `.to_async()` handler can return `Responder` type #792
### Fixed
* Fix async web::Data factory handling
## [1.0.0-beta.1] - 2019-04-20
### Added
* Add helper functions for reading test response body,
`test::read_response()` and test::read_response_json()`
* Add `.peer_addr()` #744
* Add `NormalizePath` middleware
### Changed
* Rename `RouterConfig` to `ServiceConfig`
* Rename `test::call_success` to `test::call_service`
* Removed `ServiceRequest::from_parts()` as it is unsafe to create from parts.
* `CookieIdentityPolicy::max_age()` accepts value in seconds
### Fixed
* Fixed `TestRequest::app_data()`
## [1.0.0-alpha.6] - 2019-04-14
### Changed
* Allow to use any service as default service.
* Remove generic type for request payload, always use default.
* Removed `Decompress` middleware. Bytes, String, Json, Form extractors
automatically decompress payload.
* Make extractor config type explicit. Add `FromRequest::Config` associated type.
## [1.0.0-alpha.5] - 2019-04-12
### Added
* Added async io `TestBuffer` for testing.
### Deleted
* Removed native-tls support
## [1.0.0-alpha.4] - 2019-04-08
### Added
* `App::configure()` allow to offload app configuration to different methods
* Added `URLPath` option for logger
* Added `ServiceRequest::app_data()`, returns `Data<T>`
* Added `ServiceFromRequest::app_data()`, returns `Data<T>`
### Changed
* `FromRequest` trait refactoring
* Move multipart support to actix-multipart crate
### Fixed
* Fix body propagation in Response::from_error. #760
## [1.0.0-alpha.3] - 2019-04-02
### Changed
* Renamed `TestRequest::to_service()` to `TestRequest::to_srv_request()`
* Renamed `TestRequest::to_response()` to `TestRequest::to_srv_response()`
* Removed `Deref` impls
### Removed
* Removed unused `actix_web::web::md()`
## [1.0.0-alpha.2] - 2019-03-29
### Added
* rustls support
### Changed
* use forked cookie
* multipart::Field renamed to MultipartField
## [1.0.0-alpha.1] - 2019-03-28
### Changed
* Complete architecture re-design.
* Return 405 response if no matching route found within resource #538

View File

@ -1,23 +1,24 @@
[package]
name = "actix-web"
version = "1.0.0-alpha.1"
version = "3.0.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust."
readme = "README.md"
keywords = ["http", "web", "framework", "async", "futures"]
keywords = ["actix", "http", "web", "framework", "async"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-web/"
categories = ["network-programming", "asynchronous",
"web-programming::http-server",
"web-programming::websocket"]
license = "MIT/Apache-2.0"
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
license = "MIT OR Apache-2.0"
edition = "2018"
[package.metadata.docs.rs]
features = ["openssl", "rustls", "compress", "secure-cookies"]
[badges]
travis-ci = { repository = "actix/actix-web", branch = "master" }
appveyor = { repository = "fafhrd91/actix-web-hdy9d" }
codecov = { repository = "actix/actix-web", branch = "master", service = "github" }
[lib]
@ -30,90 +31,108 @@ members = [
"awc",
"actix-http",
"actix-files",
"actix-session",
"actix-multipart",
"actix-web-actors",
"actix-web-codegen",
"test-server",
]
[package.metadata.docs.rs]
features = ["ssl", "tls", "rust-tls", "brotli", "flate2-zlib", "cookies", "client"]
[features]
default = ["brotli", "flate2-zlib", "cookies", "client"]
default = ["compress"]
# http client
client = ["awc"]
# content-encoding support
compress = ["actix-http/compress", "awc/compress"]
# brotli encoding, requires c compiler
brotli = ["actix-http/brotli2"]
# miniz-sys backend for flate2 crate
flate2-zlib = ["actix-http/flate2-zlib"]
# rust backend for flate2 crate
flate2-rust = ["actix-http/flate2-rust"]
# sessions feature, session require "ring" crate and c compiler
cookies = ["cookie", "actix-http/cookies"]
# tls
tls = ["native-tls", "actix-server/ssl"]
# sessions feature
secure-cookies = ["actix-http/secure-cookies"]
# openssl
ssl = ["openssl", "actix-server/ssl"]
openssl = ["actix-tls/openssl", "awc/openssl", "open-ssl"]
# rustls
# rust-tls = ["rustls", "actix-server/rustls"]
rustls = ["actix-tls/rustls", "awc/rustls", "rust-tls"]
[[example]]
name = "basic"
required-features = ["compress"]
[[example]]
name = "uds"
required-features = ["compress"]
[[test]]
name = "test_server"
required-features = ["compress"]
[dependencies]
actix-codec = "0.1.1"
actix-service = "0.3.4"
actix-utils = "0.3.4"
actix-router = "0.1.0"
actix-rt = "0.2.2"
actix-web-codegen = { path="actix-web-codegen" }
actix-http = { path = "actix-http", features=["fail"] }
actix-server = "0.4.1"
actix-server-config = "0.1.0"
actix-threadpool = "0.1.0"
awc = { path = "awc", optional = true }
actix-codec = "0.3.0"
actix-service = "1.0.6"
actix-utils = "2.0.0"
actix-router = "0.2.4"
actix-rt = "1.1.1"
actix-server = "1.0.0"
actix-testing = "1.0.0"
actix-macros = "0.1.0"
actix-threadpool = "0.3.1"
actix-tls = "2.0.0"
bytes = "0.4"
derive_more = "0.14"
encoding = "0.2"
futures = "0.1"
hashbrown = "0.1.8"
httparse = "1.3"
actix-web-codegen = "0.3.0"
actix-http = "2.0.0"
awc = { version = "2.0.0", default-features = false }
bytes = "0.5.3"
derive_more = "0.99.2"
encoding_rs = "0.8"
futures-channel = { version = "0.3.5", default-features = false }
futures-core = { version = "0.3.5", default-features = false }
futures-util = { version = "0.3.5", default-features = false }
fxhash = "0.2.1"
log = "0.4"
mime = "0.3"
net2 = "0.2.33"
parking_lot = "0.7"
regex = "1.0"
serde = { version = "1.0", features=["derive"] }
socket2 = "0.3"
pin-project = "0.4.17"
regex = "1.3"
serde = { version = "1.0", features = ["derive"] }
serde_json = "1.0"
serde_urlencoded = "^0.5.3"
time = "0.1"
url = { version="1.7", features=["query_encoding"] }
# cookies support
cookie = { version="0.11", features=["secure", "percent-encode"], optional = true }
# ssl support
native-tls = { version="0.2", optional = true }
openssl = { version="0.10", optional = true }
# rustls = { version = "^0.15", optional = true }
serde_urlencoded = "0.6.1"
time = { version = "0.2.7", default-features = false, features = ["std"] }
url = "2.1"
open-ssl = { package = "openssl", version = "0.10", optional = true }
rust-tls = { package = "rustls", version = "0.18.0", optional = true }
tinyvec = { version = "0.3", features = ["alloc"] }
[dev-dependencies]
actix-http = { path = "actix-http", features=["ssl", "brotli", "flate2-zlib"] }
actix-http-test = { path = "test-server", features=["ssl"] }
rand = "0.6"
env_logger = "0.6"
actix = "0.10.0"
actix-http = { version = "2.0.0-beta.4", features = ["actors"] }
rand = "0.7"
env_logger = "0.7"
serde_derive = "1.0"
tokio-timer = "0.2.8"
brotli2 = { version="^0.3.2" }
flate2 = { version="^1.0.2" }
brotli2 = "0.3.2"
flate2 = "1.0.13"
criterion = "0.3"
[profile.release]
lto = true
opt-level = 3
codegen-units = 1
[patch.crates-io]
actix-web = { path = "." }
actix-http = { path = "actix-http" }
actix-http-test = { path = "test-server" }
actix-web-codegen = { path = "actix-web-codegen" }
actix-files = { path = "actix-files" }
actix-multipart = { path = "actix-multipart" }
awc = { path = "awc" }
[[example]]
name = "client"
required-features = ["rustls"]
[[bench]]
name = "server"
harness = false
[[bench]]
name = "service"
harness = false

View File

@ -186,7 +186,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2017-NOW Nikolay Kim
Copyright 2017-NOW Actix Team
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
Copyright (c) 2017 Nikolay Kim
Copyright (c) 2017 Actix Team
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated

View File

@ -1,3 +1,438 @@
## Unreleased
## 3.0.0
* Setting a cookie's SameSite property, explicitly, to `SameSite::None` will now
result in `SameSite=None` being sent with the response Set-Cookie header.
To create a cookie without a SameSite attribute, remove any calls setting same_site.
* actix-http support for Actors messages was moved to actix-http crate and is enabled
with feature `actors`
* content_length function is removed from actix-http.
You can set Content-Length by normally setting the response body or calling no_chunking function.
* `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a
`u64` instead of a `usize`.
* Code that was using `path.<index>` to access a `web::Path<(A, B, C)>`s elements now needs to use
destructuring or `.into_inner()`. For example:
```rust
// Previously:
async fn some_route(path: web::Path<(String, String)>) -> String {
format!("Hello, {} {}", path.0, path.1)
}
// Now (this also worked before):
async fn some_route(path: web::Path<(String, String)>) -> String {
let (first_name, last_name) = path.into_inner();
format!("Hello, {} {}", first_name, last_name)
}
// Or (this wasn't previously supported):
async fn some_route(web::Path((first_name, last_name)): web::Path<(String, String)>) -> String {
format!("Hello, {} {}", first_name, last_name)
}
```
* `middleware::NormalizePath` can now also be configured to trim trailing slashes instead of always keeping one.
It will need `middleware::normalize::TrailingSlash` when being constructed with `NormalizePath::new(...)`,
or for an easier migration you can replace `wrap(middleware::NormalizePath)` with `wrap(middleware::NormalizePath::default())`.
* `HttpServer::maxconn` is renamed to the more expressive `HttpServer::max_connections`.
* `HttpServer::maxconnrate` is renamed to the more expressive `HttpServer::max_connection_rate`.
## 2.0.0
* `HttpServer::start()` renamed to `HttpServer::run()`. It also possible to
`.await` on `run` method result, in that case it awaits server exit.
* `App::register_data()` renamed to `App::app_data()` and accepts any type `T: 'static`.
Stored data is available via `HttpRequest::app_data()` method at runtime.
* Extractor configuration must be registered with `App::app_data()` instead of `App::data()`
* Sync handlers has been removed. `.to_async()` method has been renamed to `.to()`
replace `fn` with `async fn` to convert sync handler to async
* `actix_http_test::TestServer` moved to `actix_web::test` module. To start
test server use `test::start()` or `test_start_with_config()` methods
* `ResponseError` trait has been reafctored. `ResponseError::error_response()` renders
http response.
* Feature `rust-tls` renamed to `rustls`
instead of
```rust
actix-web = { version = "2.0.0", features = ["rust-tls"] }
```
use
```rust
actix-web = { version = "2.0.0", features = ["rustls"] }
```
* Feature `ssl` renamed to `openssl`
instead of
```rust
actix-web = { version = "2.0.0", features = ["ssl"] }
```
use
```rust
actix-web = { version = "2.0.0", features = ["openssl"] }
```
* `Cors` builder now requires that you call `.finish()` to construct the middleware
## 1.0.1
* Cors middleware has been moved to `actix-cors` crate
instead of
```rust
use actix_web::middleware::cors::Cors;
```
use
```rust
use actix_cors::Cors;
```
* Identity middleware has been moved to `actix-identity` crate
instead of
```rust
use actix_web::middleware::identity::{Identity, CookieIdentityPolicy, IdentityService};
```
use
```rust
use actix_identity::{Identity, CookieIdentityPolicy, IdentityService};
```
## 1.0.0
* Extractor configuration. In version 1.0 this is handled with the new `Data` mechanism for both setting and retrieving the configuration
instead of
```rust
#[derive(Default)]
struct ExtractorConfig {
config: String,
}
impl FromRequest for YourExtractor {
type Config = ExtractorConfig;
type Result = Result<YourExtractor, Error>;
fn from_request(req: &HttpRequest, cfg: &Self::Config) -> Self::Result {
println!("use the config: {:?}", cfg.config);
...
}
}
App::new().resource("/route_with_config", |r| {
r.post().with_config(handler_fn, |cfg| {
cfg.0.config = "test".to_string();
})
})
```
use the HttpRequest to get the configuration like any other `Data` with `req.app_data::<C>()` and set it with the `data()` method on the `resource`
```rust
#[derive(Default)]
struct ExtractorConfig {
config: String,
}
impl FromRequest for YourExtractor {
type Error = Error;
type Future = Result<Self, Self::Error>;
type Config = ExtractorConfig;
fn from_request(req: &HttpRequest, payload: &mut Payload) -> Self::Future {
let cfg = req.app_data::<ExtractorConfig>();
println!("config data?: {:?}", cfg.unwrap().role);
...
}
}
App::new().service(
resource("/route_with_config")
.data(ExtractorConfig {
config: "test".to_string(),
})
.route(post().to(handler_fn)),
)
```
* Resource registration. 1.0 version uses generalized resource
registration via `.service()` method.
instead of
```rust
App.new().resource("/welcome", |r| r.f(welcome))
```
use App's or Scope's `.service()` method. `.service()` method accepts
object that implements `HttpServiceFactory` trait. By default
actix-web provides `Resource` and `Scope` services.
```rust
App.new().service(
web::resource("/welcome")
.route(web::get().to(welcome))
.route(web::post().to(post_handler))
```
* Scope registration.
instead of
```rust
let app = App::new().scope("/{project_id}", |scope| {
scope
.resource("/path1", |r| r.f(|_| HttpResponse::Ok()))
.resource("/path2", |r| r.f(|_| HttpResponse::Ok()))
.resource("/path3", |r| r.f(|_| HttpResponse::MethodNotAllowed()))
});
```
use `.service()` for registration and `web::scope()` as scope object factory.
```rust
let app = App::new().service(
web::scope("/{project_id}")
.service(web::resource("/path1").to(|| HttpResponse::Ok()))
.service(web::resource("/path2").to(|| HttpResponse::Ok()))
.service(web::resource("/path3").to(|| HttpResponse::MethodNotAllowed()))
);
```
* `.with()`, `.with_async()` registration methods have been renamed to `.to()` and `.to_async()`.
instead of
```rust
App.new().resource("/welcome", |r| r.with(welcome))
```
use `.to()` or `.to_async()` methods
```rust
App.new().service(web::resource("/welcome").to(welcome))
```
* Passing arguments to handler with extractors, multiple arguments are allowed
instead of
```rust
fn welcome((body, req): (Bytes, HttpRequest)) -> ... {
...
}
```
use multiple arguments
```rust
fn welcome(body: Bytes, req: HttpRequest) -> ... {
...
}
```
* `.f()`, `.a()` and `.h()` handler registration methods have been removed.
Use `.to()` for handlers and `.to_async()` for async handlers. Handler function
must use extractors.
instead of
```rust
App.new().resource("/welcome", |r| r.f(welcome))
```
use App's `to()` or `to_async()` methods
```rust
App.new().service(web::resource("/welcome").to(welcome))
```
* `HttpRequest` does not provide access to request's payload stream.
instead of
```rust
fn index(req: &HttpRequest) -> Box<Future<Item=HttpResponse, Error=Error>> {
req
.payload()
.from_err()
.fold((), |_, chunk| {
...
})
.map(|_| HttpResponse::Ok().finish())
.responder()
}
```
use `Payload` extractor
```rust
fn index(stream: web::Payload) -> impl Future<Item=HttpResponse, Error=Error> {
stream
.from_err()
.fold((), |_, chunk| {
...
})
.map(|_| HttpResponse::Ok().finish())
}
```
* `State` is now `Data`. You register Data during the App initialization process
and then access it from handlers either using a Data extractor or using
HttpRequest's api.
instead of
```rust
App.with_state(T)
```
use App's `data` method
```rust
App.new()
.data(T)
```
and either use the Data extractor within your handler
```rust
use actix_web::web::Data;
fn endpoint_handler(Data<T>)){
...
}
```
.. or access your Data element from the HttpRequest
```rust
fn endpoint_handler(req: HttpRequest) {
let data: Option<Data<T>> = req.app_data::<T>();
}
```
* AsyncResponder is removed, use `.to_async()` registration method and `impl Future<>` as result type.
instead of
```rust
use actix_web::AsyncResponder;
fn endpoint_handler(...) -> impl Future<Item=HttpResponse, Error=Error>{
...
.responder()
}
```
.. simply omit AsyncResponder and the corresponding responder() finish method
* Middleware
instead of
```rust
let app = App::new()
.middleware(middleware::Logger::default())
```
use `.wrap()` method
```rust
let app = App::new()
.wrap(middleware::Logger::default())
.route("/index.html", web::get().to(index));
```
* `HttpRequest::body()`, `HttpRequest::urlencoded()`, `HttpRequest::json()`, `HttpRequest::multipart()`
method have been removed. Use `Bytes`, `String`, `Form`, `Json`, `Multipart` extractors instead.
instead of
```rust
fn index(req: &HttpRequest) -> Responder {
req.body()
.and_then(|body| {
...
})
}
```
use
```rust
fn index(body: Bytes) -> Responder {
...
}
```
* `actix_web::server` module has been removed. To start http server use `actix_web::HttpServer` type
* StaticFiles and NamedFile have been moved to a separate crate.
instead of `use actix_web::fs::StaticFile`
use `use actix_files::Files`
instead of `use actix_web::fs::Namedfile`
use `use actix_files::NamedFile`
* Multipart has been moved to a separate crate.
instead of `use actix_web::multipart::Multipart`
use `use actix_multipart::Multipart`
* Response compression is not enabled by default.
To enable, use `Compress` middleware, `App::new().wrap(Compress::default())`.
* Session middleware moved to actix-session crate
* Actors support have been moved to `actix-web-actors` crate
* Custom Error
Instead of error_response method alone, ResponseError now provides two methods: error_response and render_response respectively. Where, error_response creates the error response and render_response returns the error response to the caller.
Simplest migration from 0.7 to 1.0 shall include below method to the custom implementation of ResponseError:
```rust
fn render_response(&self) -> HttpResponse {
self.error_response()
}
```
## 0.7.15
* The `' '` character is not percent decoded anymore before matching routes. If you need to use it in
@ -38,9 +473,9 @@
* `HttpRequest` does not implement `Stream` anymore. If you need to read request payload
use `HttpMessage::payload()` method.
instead of
```rust
fn index(req: HttpRequest) -> impl Responder {
req
@ -66,8 +501,8 @@
trait uses `&HttpRequest` instead of `&mut HttpRequest`.
* Removed `Route::with2()` and `Route::with3()` use tuple of extractors instead.
instead of
instead of
```rust
fn index(query: Query<..>, info: Json<MyStruct) -> impl Responder {}
@ -83,7 +518,7 @@
* `Handler::handle()` accepts reference to `HttpRequest<_>` instead of value
* Removed deprecated `HttpServer::threads()`, use
* Removed deprecated `HttpServer::threads()`, use
[HttpServer::workers()](https://actix.rs/actix-web/actix_web/server/struct.HttpServer.html#method.workers) instead.
* Renamed `client::ClientConnectorError::Connector` to
@ -92,7 +527,7 @@
* `Route::with()` does not return `ExtractorConfig`, to configure
extractor use `Route::with_config()`
instead of
instead of
```rust
fn main() {
@ -103,11 +538,11 @@
});
}
```
use
use
```rust
fn main() {
let app = App::new().resource("/index.html", |r| {
r.method(http::Method::GET)
@ -137,12 +572,12 @@
* `HttpRequest::extensions()` returns read only reference to the request's Extension
`HttpRequest::extensions_mut()` returns mutable reference.
* Instead of
* Instead of
`use actix_web::middleware::{
CookieSessionBackend, CookieSessionError, RequestSession,
Session, SessionBackend, SessionImpl, SessionStorage};`
use `actix_web::middleware::session`
`use actix_web::middleware::session{CookieSessionBackend, CookieSessionError,

112
README.md
View File

@ -1,81 +1,109 @@
# Actix web [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![Build status](https://ci.appveyor.com/api/projects/status/kkdb4yce7qhm5w85/branch/master?svg=true)](https://ci.appveyor.com/project/fafhrd91/actix-web-hdy9d/branch/master) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-web)](https://crates.io/crates/actix-web) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
<div align="center">
<h1>Actix web</h1>
<p>
<strong>Actix web is a powerful, pragmatic, and extremely fast web framework for Rust</strong>
</p>
<p>
Actix web is a simple, pragmatic and extremely fast web framework for Rust.
[![crates.io](https://meritbadge.herokuapp.com/actix-web)](https://crates.io/crates/actix-web)
[![Documentation](https://docs.rs/actix-web/badge.svg)](https://docs.rs/actix-web)
[![Version](https://img.shields.io/badge/rustc-1.42+-ab6000.svg)](https://blog.rust-lang.org/2020/03/12/Rust-1.42.html)
![License](https://img.shields.io/crates/l/actix-web.svg)
<br />
[![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web)
[![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web)
[![Download](https://img.shields.io/crates/d/actix-web.svg)](https://crates.io/crates/actix-web)
[![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
* Supported *HTTP/1.x* and *HTTP/2.0* protocols
</p>
</div>
## Features
* Supports *HTTP/1.x* and *HTTP/2*
* Streaming and pipelining
* Keep-alive and slow requests handling
* Client/server [WebSockets](https://actix.rs/docs/websockets/) support
* Transparent content compression/decompression (br, gzip, deflate)
* Configurable [request routing](https://actix.rs/docs/url-dispatch/)
* Powerful [request routing](https://actix.rs/docs/url-dispatch/)
* Multipart streams
* Static assets
* SSL support with OpenSSL or `native-tls`
* Middlewares ([Logger, Session, CORS, CSRF, etc](https://actix.rs/docs/middleware/))
* Includes an asynchronous [HTTP client](https://actix.rs/actix-web/actix_web/client/index.html)
* SSL support using OpenSSL or Rustls
* Middlewares ([Logger, Session, CORS, etc](https://actix.rs/docs/middleware/))
* Includes an async [HTTP client](https://actix.rs/actix-web/actix_web/client/index.html)
* Supports [Actix actor framework](https://github.com/actix/actix)
* Experimental [Async/Await](https://github.com/mehcode/actix-web-async-await) support.
* Runs on stable Rust 1.42+
## Documentation & community resources
## Documentation
* [User Guide](https://actix.rs/docs/)
* [API Documentation (Development)](https://actix.rs/actix-web/actix_web/)
* [API Documentation (Releases)](https://docs.rs/actix-web/)
* [Chat on gitter](https://gitter.im/actix/actix)
* Cargo package: [actix-web](https://crates.io/crates/actix-web)
* Minimum supported Rust version: 1.32 or later
* [Website & User Guide](https://actix.rs)
* [Examples Repository](https://github.com/actix/examples)
* [API Documentation](https://docs.rs/actix-web)
* [API Documentation (master branch)](https://actix.rs/actix-web/actix_web)
## Example
```rust
use actix_web::{web, App, HttpServer, Responder};
Dependencies:
fn index(info: web::Path<(u32, String)>) -> impl Responder {
format!("Hello {}! id:{}", info.1, info.0)
```toml
[dependencies]
actix-web = "3"
```
Code:
```rust
use actix_web::{get, web, App, HttpServer, Responder};
#[get("/{id}/{name}/index.html")]
async fn index(web::Path((id, name)): web::Path<(u32, String)>) -> impl Responder {
format!("Hello {}! id:{}", name, id)
}
fn main() -> std::io::Result<()> {
HttpServer::new(
|| App::new()
.service(web::resource("/{id}/{name}/index.html")
.route(web::get().to(index)))
#[actix_web::main]
async fn main() -> std::io::Result<()> {
HttpServer::new(|| App::new().service(index))
.bind("127.0.0.1:8080")?
.run();
.run()
.await
}
```
### More examples
* [Basics](https://github.com/actix/examples/tree/master/basics/)
* [Stateful](https://github.com/actix/examples/tree/master/state/)
* [Multipart streams](https://github.com/actix/examples/tree/master/multipart/)
* [Simple websocket](https://github.com/actix/examples/tree/master/websocket/)
* [Tera](https://github.com/actix/examples/tree/master/template_tera/) /
[Askama](https://github.com/actix/examples/tree/master/template_askama/) templates
* [Diesel integration](https://github.com/actix/examples/tree/master/diesel/)
* [r2d2](https://github.com/actix/examples/tree/master/r2d2/)
* [SSL / HTTP/2.0](https://github.com/actix/examples/tree/master/tls/)
* [Tcp/Websocket chat](https://github.com/actix/examples/tree/master/websocket-chat/)
* [Json](https://github.com/actix/examples/tree/master/json/)
* [Basic Setup](https://github.com/actix/examples/tree/master/basics/)
* [Application State](https://github.com/actix/examples/tree/master/state/)
* [JSON Handling](https://github.com/actix/examples/tree/master/json/)
* [Multipart Streams](https://github.com/actix/examples/tree/master/multipart/)
* [Diesel Integration](https://github.com/actix/examples/tree/master/diesel/)
* [r2d2 Integration](https://github.com/actix/examples/tree/master/r2d2/)
* [Simple WebSocket](https://github.com/actix/examples/tree/master/websocket/)
* [Tera Templates](https://github.com/actix/examples/tree/master/template_tera/)
* [Askama Templates](https://github.com/actix/examples/tree/master/template_askama/)
* [HTTPS using Rustls](https://github.com/actix/examples/tree/master/rustls/)
* [HTTPS using OpenSSL](https://github.com/actix/examples/tree/master/openssl/)
* [WebSocket Chat](https://github.com/actix/examples/tree/master/websocket-chat/)
You may consider checking out
[this directory](https://github.com/actix/examples/tree/master/) for more examples.
## Benchmarks
* [TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r16&hw=ph&test=plaintext)
One of the fastest web frameworks available according to the
[TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r19).
## License
This project is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
* MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or
[http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
* MIT license ([LICENSE-MIT](LICENSE-MIT) or
[http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
at your option.
## Code of Conduct
Contribution to the actix-web crate is organized under the terms of the
Contributor Covenant, the maintainer of actix-web, @fafhrd91, promises to
intervene to uphold that code of conduct.
Contribution to the actix-web crate is organized under the terms of the Contributor Covenant, the
maintainers of Actix web, promises to intervene to uphold that code of conduct.

View File

@ -1,5 +1,90 @@
# Changes
## [0.1.0] - 2018-10-x
## [Unreleased] - 2020-xx-xx
## [0.3.0-beta.1] - 2020-07-15
* Update `v_htmlescape` to 0.10
* Update `actix-web` and `actix-http` dependencies to beta.1
## [0.3.0-alpha.1] - 2020-05-23
* Update `actix-web` and `actix-http` dependencies to alpha
* Fix some typos in the docs
* Bump minimum supported Rust version to 1.40
* Support sending Content-Length when Content-Range is specified [#1384]
[#1384]: https://github.com/actix/actix-web/pull/1384
## [0.2.1] - 2019-12-22
* Use the same format for file URLs regardless of platforms
## [0.2.0] - 2019-12-20
* Fix BodyEncoding trait import #1220
## [0.2.0-alpha.1] - 2019-12-07
* Migrate to `std::future`
## [0.1.7] - 2019-11-06
* Add an additional `filename*` param in the `Content-Disposition` header of `actix_files::NamedFile` to be more compatible. (#1151)
## [0.1.6] - 2019-10-14
* Add option to redirect to a slash-ended path `Files` #1132
## [0.1.5] - 2019-10-08
* Bump up `mime_guess` crate version to 2.0.1
* Bump up `percent-encoding` crate version to 2.1
* Allow user defined request guards for `Files` #1113
## [0.1.4] - 2019-07-20
* Allow to disable `Content-Disposition` header #686
## [0.1.3] - 2019-06-28
* Do not set `Content-Length` header, let actix-http set it #930
## [0.1.2] - 2019-06-13
* Content-Length is 0 for NamedFile HEAD request #914
* Fix ring dependency from actix-web default features for #741
## [0.1.1] - 2019-06-01
* Static files are incorrectly served as both chunked and with length #812
## [0.1.0] - 2019-05-25
* NamedFile last-modified check always fails due to nano-seconds
in file modified date #820
## [0.1.0-beta.4] - 2019-05-12
* Update actix-web to beta.4
## [0.1.0-beta.1] - 2019-04-20
* Update actix-web to beta.1
## [0.1.0-alpha.6] - 2019-04-14
* Update actix-web to alpha6
## [0.1.0-alpha.4] - 2019-04-08
* Update actix-web to alpha4
## [0.1.0-alpha.2] - 2019-04-02
* Add default handler support
## [0.1.0-alpha.1] - 2019-03-28
* Initial impl

View File

@ -1,36 +1,36 @@
[package]
name = "actix-files"
version = "0.1.0-alpha.1"
version = "0.3.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Static files support for Actix web."
description = "Static files support for actix web."
readme = "README.md"
keywords = ["actix", "http", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-files/"
categories = ["asynchronous", "web-programming::http-server"]
license = "MIT/Apache-2.0"
license = "MIT OR Apache-2.0"
edition = "2018"
workspace = ".."
[lib]
name = "actix_files"
path = "src/lib.rs"
[dependencies]
actix-web = { path=".." }
actix-http = { path="../actix-http" }
actix-service = "0.3.3"
actix-web = { version = "3.0.0", default-features = false }
actix-http = "2.0.0"
actix-service = "1.0.6"
bitflags = "1"
bytes = "0.4"
futures = "0.1"
derive_more = "0.14"
bytes = "0.5.3"
futures-core = { version = "0.3.5", default-features = false }
futures-util = { version = "0.3.5", default-features = false }
derive_more = "0.99.2"
log = "0.4"
mime = "0.3"
mime_guess = "2.0.0-alpha"
percent-encoding = "1.0"
v_htmlescape = "0.4"
mime_guess = "2.0.1"
percent-encoding = "2.1"
v_htmlescape = "0.10"
[dev-dependencies]
actix-web = { path="..", features=["ssl"] }
actix-rt = "1.0.0"
actix-web = { version = "3.0.0", features = ["openssl"] }

1
actix-files/LICENSE-APACHE Symbolic link
View File

@ -0,0 +1 @@
../LICENSE-APACHE

1
actix-files/LICENSE-MIT Symbolic link
View File

@ -0,0 +1 @@
../LICENSE-MIT

View File

@ -1,82 +1,9 @@
# Actix web [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![Build status](https://ci.appveyor.com/api/projects/status/kkdb4yce7qhm5w85/branch/master?svg=true)](https://ci.appveyor.com/project/fafhrd91/actix-web-hdy9d/branch/master) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-web)](https://crates.io/crates/actix-web) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
Actix web is a simple, pragmatic and extremely fast web framework for Rust.
* Supported *HTTP/1.x* and [*HTTP/2.0*](https://actix.rs/docs/http2/) protocols
* Streaming and pipelining
* Keep-alive and slow requests handling
* Client/server [WebSockets](https://actix.rs/docs/websockets/) support
* Transparent content compression/decompression (br, gzip, deflate)
* Configurable [request routing](https://actix.rs/docs/url-dispatch/)
* Multipart streams
* Static assets
* SSL support with OpenSSL or `native-tls`
* Middlewares ([Logger, Session, CORS, CSRF, etc](https://actix.rs/docs/middleware/))
* Includes an asynchronous [HTTP client](https://actix.rs/actix-web/actix_web/client/index.html)
* Built on top of [Actix actor framework](https://github.com/actix/actix)
* Experimental [Async/Await](https://github.com/mehcode/actix-web-async-await) support.
# Static files support for actix web [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-files)](https://crates.io/crates/actix-files) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
## Documentation & community resources
* [User Guide](https://actix.rs/docs/)
* [API Documentation (Development)](https://actix.rs/actix-web/actix_web/)
* [API Documentation (Releases)](https://actix.rs/api/actix-web/stable/actix_web/)
* [API Documentation](https://docs.rs/actix-files/)
* [Chat on gitter](https://gitter.im/actix/actix)
* Cargo package: [actix-web](https://crates.io/crates/actix-web)
* Minimum supported Rust version: 1.31 or later
## Example
```rust
extern crate actix_web;
use actix_web::{http, server, App, Path, Responder};
fn index(info: Path<(u32, String)>) -> impl Responder {
format!("Hello {}! id:{}", info.1, info.0)
}
fn main() {
server::new(
|| App::new()
.route("/{id}/{name}/index.html", http::Method::GET, index))
.bind("127.0.0.1:8080").unwrap()
.run();
}
```
### More examples
* [Basics](https://github.com/actix/examples/tree/master/basics/)
* [Stateful](https://github.com/actix/examples/tree/master/state/)
* [Protobuf support](https://github.com/actix/examples/tree/master/protobuf/)
* [Multipart streams](https://github.com/actix/examples/tree/master/multipart/)
* [Simple websocket](https://github.com/actix/examples/tree/master/websocket/)
* [Tera](https://github.com/actix/examples/tree/master/template_tera/) /
[Askama](https://github.com/actix/examples/tree/master/template_askama/) templates
* [Diesel integration](https://github.com/actix/examples/tree/master/diesel/)
* [r2d2](https://github.com/actix/examples/tree/master/r2d2/)
* [SSL / HTTP/2.0](https://github.com/actix/examples/tree/master/tls/)
* [Tcp/Websocket chat](https://github.com/actix/examples/tree/master/websocket-chat/)
* [Json](https://github.com/actix/examples/tree/master/json/)
You may consider checking out
[this directory](https://github.com/actix/examples/tree/master/) for more examples.
## Benchmarks
* [TechEmpower Framework Benchmark](https://www.techempower.com/benchmarks/#section=data-r16&hw=ph&test=plaintext)
## License
This project is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
* MIT license ([LICENSE-MIT](LICENSE-MIT) or [http://opensource.org/licenses/MIT](http://opensource.org/licenses/MIT))
at your option.
## Code of Conduct
Contribution to the actix-web crate is organized under the terms of the
Contributor Covenant, the maintainer of actix-web, @fafhrd91, promises to
intervene to uphold that code of conduct.
* Cargo package: [actix-files](https://crates.io/crates/actix-files)
* Minimum supported Rust version: 1.40 or later

View File

@ -5,6 +5,7 @@ use derive_more::Display;
#[derive(Display, Debug, PartialEq)]
pub enum FilesError {
/// Path is not a directory
#[allow(dead_code)]
#[display(fmt = "Path is not a directory. Unable to serve static files")]
IsNotDirectory,
@ -35,7 +36,7 @@ pub enum UriSegmentError {
/// Return `BadRequest` for `UriSegmentError`
impl ResponseError for UriSegmentError {
fn error_response(&self) -> HttpResponse {
HttpResponse::new(StatusCode::BAD_REQUEST)
fn status_code(&self) -> StatusCode {
StatusCode::BAD_REQUEST
}
}

File diff suppressed because it is too large Load Diff

View File

@ -8,23 +8,25 @@ use std::time::{SystemTime, UNIX_EPOCH};
use std::os::unix::fs::MetadataExt;
use bitflags::bitflags;
use mime;
use mime_guess::guess_mime_type;
use mime_guess::from_path;
use actix_http::body::SizedStream;
use actix_web::dev::BodyEncoding;
use actix_web::http::header::{
self, ContentDisposition, DispositionParam, DispositionType,
self, Charset, ContentDisposition, DispositionParam, DispositionType, ExtendedValue,
};
use actix_web::http::{ContentEncoding, Method, StatusCode};
use actix_web::middleware::encoding::BodyEncoding;
use actix_web::http::{ContentEncoding, StatusCode};
use actix_web::{Error, HttpMessage, HttpRequest, HttpResponse, Responder};
use futures_util::future::{ready, Ready};
use crate::range::HttpRange;
use crate::ChunkedReadFile;
bitflags! {
pub(crate) struct Flags: u32 {
const ETAG = 0b00000001;
const LAST_MD = 0b00000010;
pub(crate) struct Flags: u8 {
const ETAG = 0b0000_0001;
const LAST_MD = 0b0000_0010;
const CONTENT_DISPOSITION = 0b0000_0100;
}
}
@ -39,13 +41,13 @@ impl Default for Flags {
pub struct NamedFile {
path: PathBuf,
file: File,
modified: Option<SystemTime>,
pub(crate) md: Metadata,
pub(crate) flags: Flags,
pub(crate) status_code: StatusCode,
pub(crate) content_type: mime::Mime,
pub(crate) content_disposition: header::ContentDisposition,
pub(crate) md: Metadata,
modified: Option<SystemTime>,
encoding: Option<ContentEncoding>,
pub(crate) status_code: StatusCode,
pub(crate) flags: Flags,
pub(crate) encoding: Option<ContentEncoding>,
}
impl NamedFile {
@ -66,6 +68,7 @@ impl NamedFile {
/// let mut file = File::create("foo.txt")?;
/// file.write_all(b"Hello, world!")?;
/// let named_file = NamedFile::from_file(file, "bar.txt")?;
/// # std::fs::remove_file("foo.txt");
/// Ok(())
/// }
/// ```
@ -85,14 +88,23 @@ impl NamedFile {
}
};
let ct = guess_mime_type(&path);
let disposition_type = match ct.type_() {
let ct = from_path(&path).first_or_octet_stream();
let disposition = match ct.type_() {
mime::IMAGE | mime::TEXT | mime::VIDEO => DispositionType::Inline,
_ => DispositionType::Attachment,
};
let mut parameters =
vec![DispositionParam::Filename(String::from(filename.as_ref()))];
if !filename.is_ascii() {
parameters.push(DispositionParam::FilenameExt(ExtendedValue {
charset: Charset::Ext(String::from("UTF-8")),
language_tag: None,
value: filename.into_owned().into_bytes(),
}))
}
let cd = ContentDisposition {
disposition: disposition_type,
parameters: vec![DispositionParam::Filename(filename.into_owned())],
disposition,
parameters,
};
(ct, cd)
};
@ -170,11 +182,21 @@ impl NamedFile {
/// sent to the peer. By default the disposition is `inline` for text,
/// image, and video content types, and `attachment` otherwise, and
/// the filename is taken from the path provided in the `open` method
/// after converting it to UTF-8 using
/// after converting it to UTF-8 using.
/// [to_string_lossy](https://doc.rust-lang.org/std/ffi/struct.OsStr.html#method.to_string_lossy).
#[inline]
pub fn set_content_disposition(mut self, cd: header::ContentDisposition) -> Self {
self.content_disposition = cd;
self.flags.insert(Flags::CONTENT_DISPOSITION);
self
}
/// Disable `Content-Disposition` header.
///
/// By default Content-Disposition` header is enabled.
#[inline]
pub fn disable_content_disposition(mut self) -> Self {
self.flags.remove(Flags::CONTENT_DISPOSITION);
self
}
@ -233,6 +255,145 @@ impl NamedFile {
pub(crate) fn last_modified(&self) -> Option<header::HttpDate> {
self.modified.map(|mtime| mtime.into())
}
pub fn into_response(self, req: &HttpRequest) -> Result<HttpResponse, Error> {
if self.status_code != StatusCode::OK {
let mut resp = HttpResponse::build(self.status_code);
resp.set(header::ContentType(self.content_type.clone()))
.if_true(self.flags.contains(Flags::CONTENT_DISPOSITION), |res| {
res.header(
header::CONTENT_DISPOSITION,
self.content_disposition.to_string(),
);
});
if let Some(current_encoding) = self.encoding {
resp.encoding(current_encoding);
}
let reader = ChunkedReadFile {
size: self.md.len(),
offset: 0,
file: Some(self.file),
fut: None,
counter: 0,
};
return Ok(resp.streaming(reader));
}
let etag = if self.flags.contains(Flags::ETAG) {
self.etag()
} else {
None
};
let last_modified = if self.flags.contains(Flags::LAST_MD) {
self.last_modified()
} else {
None
};
// check preconditions
let precondition_failed = if !any_match(etag.as_ref(), req) {
true
} else if let (Some(ref m), Some(header::IfUnmodifiedSince(ref since))) =
(last_modified, req.get_header())
{
let t1: SystemTime = m.clone().into();
let t2: SystemTime = since.clone().into();
match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) {
(Ok(t1), Ok(t2)) => t1 > t2,
_ => false,
}
} else {
false
};
// check last modified
let not_modified = if !none_match(etag.as_ref(), req) {
true
} else if req.headers().contains_key(&header::IF_NONE_MATCH) {
false
} else if let (Some(ref m), Some(header::IfModifiedSince(ref since))) =
(last_modified, req.get_header())
{
let t1: SystemTime = m.clone().into();
let t2: SystemTime = since.clone().into();
match (t1.duration_since(UNIX_EPOCH), t2.duration_since(UNIX_EPOCH)) {
(Ok(t1), Ok(t2)) => t1 <= t2,
_ => false,
}
} else {
false
};
let mut resp = HttpResponse::build(self.status_code);
resp.set(header::ContentType(self.content_type.clone()))
.if_true(self.flags.contains(Flags::CONTENT_DISPOSITION), |res| {
res.header(
header::CONTENT_DISPOSITION,
self.content_disposition.to_string(),
);
});
// default compressing
if let Some(current_encoding) = self.encoding {
resp.encoding(current_encoding);
}
resp.if_some(last_modified, |lm, resp| {
resp.set(header::LastModified(lm));
})
.if_some(etag, |etag, resp| {
resp.set(header::ETag(etag));
});
resp.header(header::ACCEPT_RANGES, "bytes");
let mut length = self.md.len();
let mut offset = 0;
// check for range header
if let Some(ranges) = req.headers().get(&header::RANGE) {
if let Ok(rangesheader) = ranges.to_str() {
if let Ok(rangesvec) = HttpRange::parse(rangesheader, length) {
length = rangesvec[0].length;
offset = rangesvec[0].start;
resp.encoding(ContentEncoding::Identity);
resp.header(
header::CONTENT_RANGE,
format!(
"bytes {}-{}/{}",
offset,
offset + length - 1,
self.md.len()
),
);
} else {
resp.header(header::CONTENT_RANGE, format!("bytes */{}", length));
return Ok(resp.status(StatusCode::RANGE_NOT_SATISFIABLE).finish());
};
} else {
return Ok(resp.status(StatusCode::BAD_REQUEST).finish());
};
};
if precondition_failed {
return Ok(resp.status(StatusCode::PRECONDITION_FAILED).finish());
} else if not_modified {
return Ok(resp.status(StatusCode::NOT_MODIFIED).finish());
}
let reader = ChunkedReadFile {
offset,
size: length,
file: Some(self.file),
fut: None,
counter: 0,
};
if offset != 0 || length != self.md.len() {
resp.status(StatusCode::PARTIAL_CONTENT);
}
Ok(resp.body(SizedStream::new(length, reader)))
}
}
impl Deref for NamedFile {
@ -286,145 +447,9 @@ fn none_match(etag: Option<&header::EntityTag>, req: &HttpRequest) -> bool {
impl Responder for NamedFile {
type Error = Error;
type Future = Result<HttpResponse, Error>;
type Future = Ready<Result<HttpResponse, Error>>;
fn respond_to(self, req: &HttpRequest) -> Self::Future {
if self.status_code != StatusCode::OK {
let mut resp = HttpResponse::build(self.status_code);
resp.set(header::ContentType(self.content_type.clone()))
.header(
header::CONTENT_DISPOSITION,
self.content_disposition.to_string(),
);
// TODO blocking by compressing
// if let Some(current_encoding) = self.encoding {
// resp.content_encoding(current_encoding);
// }
let reader = ChunkedReadFile {
size: self.md.len(),
offset: 0,
file: Some(self.file),
fut: None,
counter: 0,
};
return Ok(resp.streaming(reader));
}
match req.method() {
&Method::HEAD | &Method::GET => (),
_ => {
return Ok(HttpResponse::MethodNotAllowed()
.header(header::CONTENT_TYPE, "text/plain")
.header(header::ALLOW, "GET, HEAD")
.body("This resource only supports GET and HEAD."));
}
}
let etag = if self.flags.contains(Flags::ETAG) {
self.etag()
} else {
None
};
let last_modified = if self.flags.contains(Flags::LAST_MD) {
self.last_modified()
} else {
None
};
// check preconditions
let precondition_failed = if !any_match(etag.as_ref(), req) {
true
} else if let (Some(ref m), Some(header::IfUnmodifiedSince(ref since))) =
(last_modified, req.get_header())
{
m > since
} else {
false
};
// check last modified
let not_modified = if !none_match(etag.as_ref(), req) {
true
} else if req.headers().contains_key(header::IF_NONE_MATCH) {
false
} else if let (Some(ref m), Some(header::IfModifiedSince(ref since))) =
(last_modified, req.get_header())
{
m <= since
} else {
false
};
let mut resp = HttpResponse::build(self.status_code);
resp.set(header::ContentType(self.content_type.clone()))
.header(
header::CONTENT_DISPOSITION,
self.content_disposition.to_string(),
);
// default compressing
if let Some(current_encoding) = self.encoding {
resp.encoding(current_encoding);
}
resp.if_some(last_modified, |lm, resp| {
resp.set(header::LastModified(lm));
})
.if_some(etag, |etag, resp| {
resp.set(header::ETag(etag));
});
resp.header(header::ACCEPT_RANGES, "bytes");
let mut length = self.md.len();
let mut offset = 0;
// check for range header
if let Some(ranges) = req.headers().get(header::RANGE) {
if let Ok(rangesheader) = ranges.to_str() {
if let Ok(rangesvec) = HttpRange::parse(rangesheader, length) {
length = rangesvec[0].length;
offset = rangesvec[0].start;
resp.encoding(ContentEncoding::Identity);
resp.header(
header::CONTENT_RANGE,
format!(
"bytes {}-{}/{}",
offset,
offset + length - 1,
self.md.len()
),
);
} else {
resp.header(header::CONTENT_RANGE, format!("bytes */{}", length));
return Ok(resp.status(StatusCode::RANGE_NOT_SATISFIABLE).finish());
};
} else {
return Ok(resp.status(StatusCode::BAD_REQUEST).finish());
};
};
resp.header(header::CONTENT_LENGTH, format!("{}", length));
if precondition_failed {
return Ok(resp.status(StatusCode::PRECONDITION_FAILED).finish());
} else if not_modified {
return Ok(resp.status(StatusCode::NOT_MODIFIED).finish());
}
if *req.method() == Method::HEAD {
Ok(resp.finish())
} else {
let reader = ChunkedReadFile {
offset,
size: length,
file: Some(self.file),
fut: None,
counter: 0,
};
if offset != 0 || length != self.md.len() {
return Ok(resp.status(StatusCode::PARTIAL_CONTENT).streaming(reader));
};
Ok(resp.streaming(reader))
}
ready(self.into_response(req))
}
}

View File

@ -5,7 +5,7 @@ pub struct HttpRange {
pub length: u64,
}
static PREFIX: &'static str = "bytes=";
static PREFIX: &str = "bytes=";
const PREFIX_LEN: usize = 6;
impl HttpRange {

View File

@ -1,41 +0,0 @@
environment:
global:
PROJECT_NAME: actix-http
matrix:
# Stable channel
- TARGET: i686-pc-windows-msvc
CHANNEL: stable
- TARGET: x86_64-pc-windows-gnu
CHANNEL: stable
- TARGET: x86_64-pc-windows-msvc
CHANNEL: stable
# Nightly channel
- TARGET: i686-pc-windows-msvc
CHANNEL: nightly
- TARGET: x86_64-pc-windows-gnu
CHANNEL: nightly
- TARGET: x86_64-pc-windows-msvc
CHANNEL: nightly
# Install Rust and Cargo
# (Based on from https://github.com/rust-lang/libc/blob/master/appveyor.yml)
install:
- ps: >-
If ($Env:TARGET -eq 'x86_64-pc-windows-gnu') {
$Env:PATH += ';C:\msys64\mingw64\bin'
} ElseIf ($Env:TARGET -eq 'i686-pc-windows-gnu') {
$Env:PATH += ';C:\MinGW\bin'
}
- curl -sSf -o rustup-init.exe https://win.rustup.rs
- rustup-init.exe --default-host %TARGET% --default-toolchain %CHANNEL% -y
- set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
- rustc -Vv
- cargo -V
# 'cargo test' takes care of building for us, so disable Appveyor's build stage.
build: false
# Equivalent to Travis' `script` phase
test_script:
- cargo clean
- cargo test

View File

@ -1,5 +1,416 @@
# Changes
## Unreleased - 2020-xx-xx
## 2.0.0 - 2020-09-11
* No significant changes from `2.0.0-beta.4`.
## 2.0.0-beta.4 - 2020-09-09
### Changed
* Update actix-codec and actix-utils dependencies.
* Update actix-connect and actix-tls dependencies.
## [2.0.0-beta.3] - 2020-08-14
### Fixed
* Memory leak of `client::pool::ConnectorPoolSupport`. [#1626]
[#1626]: https://github.com/actix/actix-web/pull/1626
## [2.0.0-beta.2] - 2020-07-21
### Fixed
* Potential UB in h1 decoder using uninitialized memory. [#1614]
### Changed
* Fix illegal chunked encoding. [#1615]
[#1614]: https://github.com/actix/actix-web/pull/1614
[#1615]: https://github.com/actix/actix-web/pull/1615
## [2.0.0-beta.1] - 2020-07-11
### Changed
* Migrate cookie handling to `cookie` crate. [#1558]
* Update `sha-1` to 0.9. [#1586]
* Fix leak in client pool. [#1580]
* MSRV is now 1.41.1.
[#1558]: https://github.com/actix/actix-web/pull/1558
[#1586]: https://github.com/actix/actix-web/pull/1586
[#1580]: https://github.com/actix/actix-web/pull/1580
## [2.0.0-alpha.4] - 2020-05-21
### Changed
* Bump minimum supported Rust version to 1.40
* content_length function is removed, and you can set Content-Length by calling no_chunking function [#1439]
* `BodySize::Sized64` variant has been removed. `BodySize::Sized` now receives a
`u64` instead of a `usize`.
* Update `base64` dependency to 0.12
### Fixed
* Support parsing of `SameSite=None` [#1503]
[#1439]: https://github.com/actix/actix-web/pull/1439
[#1503]: https://github.com/actix/actix-web/pull/1503
## [2.0.0-alpha.3] - 2020-05-08
### Fixed
* Correct spelling of ConnectError::Unresolved [#1487]
* Fix a mistake in the encoding of websocket continuation messages wherein
Item::FirstText and Item::FirstBinary are each encoded as the other.
### Changed
* Implement `std::error::Error` for our custom errors [#1422]
* Remove `failure` support for `ResponseError` since that crate
will be deprecated in the near future.
[#1422]: https://github.com/actix/actix-web/pull/1422
[#1487]: https://github.com/actix/actix-web/pull/1487
## [2.0.0-alpha.2] - 2020-03-07
### Changed
* Update `actix-connect` and `actix-tls` dependency to 2.0.0-alpha.1. [#1395]
* Change default initial window size and connection window size for HTTP2 to 2MB and 1MB respectively
to improve download speed for awc when downloading large objects. [#1394]
* client::Connector accepts initial_window_size and initial_connection_window_size HTTP2 configuration. [#1394]
* client::Connector allowing to set max_http_version to limit HTTP version to be used. [#1394]
[#1394]: https://github.com/actix/actix-web/pull/1394
[#1395]: https://github.com/actix/actix-web/pull/1395
## [2.0.0-alpha.1] - 2020-02-27
### Changed
* Update the `time` dependency to 0.2.7.
* Moved actors messages support from actix crate, enabled with feature `actors`.
* Breaking change: trait MessageBody requires Unpin and accepting Pin<&mut Self> instead of &mut self in the poll_next().
* MessageBody is not implemented for &'static [u8] anymore.
### Fixed
* Allow `SameSite=None` cookies to be sent in a response.
## [1.0.1] - 2019-12-20
### Fixed
* Poll upgrade service's readiness from HTTP service handlers
* Replace brotli with brotli2 #1224
## [1.0.0] - 2019-12-13
### Added
* Add websockets continuation frame support
### Changed
* Replace `flate2-xxx` features with `compress`
## [1.0.0-alpha.5] - 2019-12-09
### Fixed
* Check `Upgrade` service readiness before calling it
* Fix buffer remaining capacity calcualtion
### Changed
* Websockets: Ping and Pong should have binary data #1049
## [1.0.0-alpha.4] - 2019-12-08
### Added
* Add impl ResponseBuilder for Error
### Changed
* Use rust based brotli compression library
## [1.0.0-alpha.3] - 2019-12-07
### Changed
* Migrate to tokio 0.2
* Migrate to `std::future`
## [0.2.11] - 2019-11-06
### Added
* Add support for serde_json::Value to be passed as argument to ResponseBuilder.body()
* Add an additional `filename*` param in the `Content-Disposition` header of `actix_files::NamedFile` to be more compatible. (#1151)
* Allow to use `std::convert::Infallible` as `actix_http::error::Error`
### Fixed
* To be compatible with non-English error responses, `ResponseError` rendered with `text/plain; charset=utf-8` header #1118
## [0.2.10] - 2019-09-11
### Added
* Add support for sending HTTP requests with `Rc<RequestHead>` in addition to sending HTTP requests with `RequestHead`
### Fixed
* h2 will use error response #1080
* on_connect result isn't added to request extensions for http2 requests #1009
## [0.2.9] - 2019-08-13
### Changed
* Dropped the `byteorder`-dependency in favor of `stdlib`-implementation
* Update percent-encoding to 2.1
* Update serde_urlencoded to 0.6.1
### Fixed
* Fixed a panic in the HTTP2 handshake in client HTTP requests (#1031)
## [0.2.8] - 2019-08-01
### Added
* Add `rustls` support
* Add `Clone` impl for `HeaderMap`
### Fixed
* awc client panic #1016
* Invalid response with compression middleware enabled, but compression-related features disabled #997
## [0.2.7] - 2019-07-18
### Added
* Add support for downcasting response errors #986
## [0.2.6] - 2019-07-17
### Changed
* Replace `ClonableService` with local copy
* Upgrade `rand` dependency version to 0.7
## [0.2.5] - 2019-06-28
### Added
* Add `on-connect` callback, `HttpServiceBuilder::on_connect()` #946
### Changed
* Use `encoding_rs` crate instead of unmaintained `encoding` crate
* Add `Copy` and `Clone` impls for `ws::Codec`
## [0.2.4] - 2019-06-16
### Fixed
* Do not compress NoContent (204) responses #918
## [0.2.3] - 2019-06-02
### Added
* Debug impl for ResponseBuilder
* From SizedStream and BodyStream for Body
### Changed
* SizedStream uses u64
## [0.2.2] - 2019-05-29
### Fixed
* Parse incoming stream before closing stream on disconnect #868
## [0.2.1] - 2019-05-25
### Fixed
* Handle socket read disconnect
## [0.2.0] - 2019-05-12
### Changed
* Update actix-service to 0.4
* Expect and upgrade services accept `ServerConfig` config.
### Deleted
* `OneRequest` service
## [0.1.5] - 2019-05-04
### Fixed
* Clean up response extensions in response pool #817
## [0.1.4] - 2019-04-24
### Added
* Allow to render h1 request headers in `Camel-Case`
### Fixed
* Read until eof for http/1.0 responses #771
## [0.1.3] - 2019-04-23
### Fixed
* Fix http client pool management
* Fix http client wait queue management #794
## [0.1.2] - 2019-04-23
### Fixed
* Fix BorrowMutError panic in client connector #793
## [0.1.1] - 2019-04-19
### Changed
* Cookie::max_age() accepts value in seconds
* Cookie::max_age_time() accepts value in time::Duration
* Allow to specify server address for client connector
## [0.1.0] - 2019-04-16
### Added
* Expose peer addr via `Request::peer_addr()` and `RequestHead::peer_addr`
### Changed
* `actix_http::encoding` always available
* use trust-dns-resolver 0.11.0
## [0.1.0-alpha.5] - 2019-04-12
### Added
* Allow to use custom service for upgrade requests
* Added `h1::SendResponse` future.
### Changed
* MessageBody::length() renamed to MessageBody::size() for consistency
* ws handshake verification functions take RequestHead instead of Request
## [0.1.0-alpha.4] - 2019-04-08
### Added
* Allow to use custom `Expect` handler
* Add minimal `std::error::Error` impl for `Error`
### Changed
* Export IntoHeaderValue
* Render error and return as response body
* Use thread pool for response body comression
### Deleted
* Removed PayloadBuffer
## [0.1.0-alpha.3] - 2019-04-02
### Added
* Warn when an unsealed private cookie isn't valid UTF-8
### Fixed
* Rust 1.31.0 compatibility
* Preallocate read buffer for h1 codec
* Detect socket disconnection during protocol selection
## [0.1.0-alpha.2] - 2019-03-29
### Added
* Added ws::Message::Nop, no-op websockets message
### Changed
* Do not use thread pool for decomression if chunk size is smaller than 2048.
## [0.1.0-alpha.1] - 2019-03-28
* Initial impl

View File

@ -1,26 +1,21 @@
[package]
name = "actix-http"
version = "0.1.0-alpha.1"
version = "2.0.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix http primitives"
description = "Actix HTTP primitives"
readme = "README.md"
keywords = ["actix", "http", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-http.git"
repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-http/"
categories = ["network-programming", "asynchronous",
"web-programming::http-server",
"web-programming::websocket"]
license = "MIT/Apache-2.0"
license = "MIT OR Apache-2.0"
edition = "2018"
workspace = ".."
[package.metadata.docs.rs]
features = ["ssl", "fail", "cookie", "brotli", "flate2-zlib"]
[badges]
travis-ci = { repository = "actix/actix-web", branch = "master" }
codecov = { repository = "actix/actix-web", branch = "master", service = "github" }
features = ["openssl", "rustls", "compress", "secure-cookies", "actors"]
[lib]
name = "actix_http"
@ -30,77 +25,85 @@ path = "src/lib.rs"
default = []
# openssl
ssl = ["openssl", "actix-connect/ssl"]
openssl = ["actix-tls/openssl", "actix-connect/openssl"]
# cookies integration
cookies = ["cookie"]
# rustls support
rustls = ["actix-tls/rustls", "actix-connect/rustls"]
# brotli encoding, requires c compiler
brotli = ["brotli2"]
# enable compressison support
compress = ["flate2", "brotli2"]
# miniz-sys backend for flate2 crate
flate2-zlib = ["flate2/miniz-sys"]
# support for secure cookies
secure-cookies = ["cookie/secure"]
# rust backend for flate2 crate
flate2-rust = ["flate2/rust_backend"]
# failure integration. actix does not use failure anymore
fail = ["failure"]
# support for actix Actor messages
actors = ["actix"]
[dependencies]
actix-service = "0.3.4"
actix-codec = "0.1.2"
actix-connect = "0.1.0"
actix-utils = "0.3.4"
actix-server-config = "0.1.0"
actix-threadpool = "0.1.0"
actix-service = "1.0.6"
actix-codec = "0.3.0"
actix-connect = "2.0.0"
actix-utils = "2.0.0"
actix-rt = "1.0.0"
actix-threadpool = "0.3.1"
actix-tls = { version = "2.0.0", optional = true }
actix = { version = "0.10.0", optional = true }
base64 = "0.10"
bitflags = "1.0"
bytes = "0.4"
byteorder = "1.2"
derive_more = "0.14"
encoding = "0.2"
futures = "0.1"
hashbrown = "0.1.8"
h2 = "0.1.16"
http = "0.1.16"
base64 = "0.12"
bitflags = "1.2"
bytes = "0.5.3"
cookie = { version = "0.14.1", features = ["percent-encode"] }
copyless = "0.1.4"
derive_more = "0.99.2"
either = "1.5.3"
encoding_rs = "0.8"
futures-channel = { version = "0.3.5", default-features = false }
futures-core = { version = "0.3.5", default-features = false }
futures-util = { version = "0.3.5", default-features = false }
fxhash = "0.2.1"
h2 = "0.2.1"
http = "0.2.0"
httparse = "1.3"
indexmap = "1.0"
lazy_static = "1.0"
indexmap = "1.3"
itoa = "0.4"
lazy_static = "1.4"
language-tags = "0.2"
log = "0.4"
mime = "0.3"
percent-encoding = "1.0"
rand = "0.6"
regex = "1.0"
percent-encoding = "2.1"
pin-project = "0.4.17"
rand = "0.7"
regex = "1.3"
serde = "1.0"
serde_json = "1.0"
sha1 = "0.6"
sha-1 = "0.9"
slab = "0.4"
serde_urlencoded = "0.5.3"
time = "0.1"
tokio-tcp = "0.1.3"
tokio-timer = "0.2"
tokio-current-thread = "0.1"
trust-dns-resolver = { version="0.11.0-alpha.2", default-features = false }
serde_urlencoded = "0.6.1"
time = { version = "0.2.7", default-features = false, features = ["std"] }
# compression
brotli2 = { version="^0.3.2", optional = true }
flate2 = { version="^1.0.2", optional = true, default-features = false }
# optional deps
cookie = { version="0.11", features=["percent-encode"], optional = true }
failure = { version = "0.1.5", optional = true }
openssl = { version="0.10", optional = true }
brotli2 = { version="0.3.2", optional = true }
flate2 = { version = "1.0.13", optional = true }
[dev-dependencies]
actix-rt = "0.2.2"
actix-server = { version = "0.4.0", features=["ssl"] }
actix-connect = { version = "0.1.0", features=["ssl"] }
actix-http-test = { path="../test-server", features=["ssl"] }
env_logger = "0.6"
actix-server = "1.0.1"
actix-connect = { version = "2.0.0", features = ["openssl"] }
actix-http-test = { version = "2.0.0", features = ["openssl"] }
actix-tls = { version = "2.0.0", features = ["openssl"] }
criterion = "0.3"
env_logger = "0.7"
serde_derive = "1.0"
openssl = { version="0.10" }
tokio-tcp = "0.1"
open-ssl = { version="0.10", package = "openssl" }
rust-tls = { version="0.18", package = "rustls" }
[[bench]]
name = "content-length"
harness = false
[[bench]]
name = "status-line"
harness = false
[[bench]]
name = "uninit-headers"
harness = false

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2017-NOW Nikolay Kim
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

1
actix-http/LICENSE-APACHE Symbolic link
View File

@ -0,0 +1 @@
../LICENSE-APACHE

View File

@ -1,25 +0,0 @@
Copyright (c) 2017 Nikolay Kim
Permission is hereby granted, free of charge, to any
person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the
Software without restriction, including without
limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software
is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice
shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED
TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR
IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.

1
actix-http/LICENSE-MIT Symbolic link
View File

@ -0,0 +1 @@
../LICENSE-MIT

View File

@ -1,4 +1,4 @@
# Actix http [![Build Status](https://travis-ci.org/actix/actix-http.svg?branch=master)](https://travis-ci.org/actix/actix-http) [![codecov](https://codecov.io/gh/actix/actix-http/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-http) [![crates.io](https://meritbadge.herokuapp.com/actix-web)](https://crates.io/crates/actix-web) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
# Actix http [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-http)](https://crates.io/crates/actix-http) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
Actix http
@ -8,25 +8,40 @@ Actix http
* [API Documentation](https://docs.rs/actix-http/)
* [Chat on gitter](https://gitter.im/actix/actix)
* Cargo package: [actix-http](https://crates.io/crates/actix-http)
* Minimum supported Rust version: 1.26 or later
* Minimum supported Rust version: 1.40 or later
## Example
```rust
// see examples/framed_hello.rs for complete list of used crates.
extern crate actix_http;
use actix_http::{h1, Response, ServiceConfig};
use std::{env, io};
fn main() {
Server::new().bind("framed_hello", "127.0.0.1:8080", || {
IntoFramed::new(|| h1::Codec::new(ServiceConfig::default())) // <- create h1 codec
.and_then(TakeItem::new().map_err(|_| ())) // <- read one request
.and_then(|(_req, _framed): (_, Framed<_, _>)| { // <- send response and close conn
SendResponse::send(_framed, Response::Ok().body("Hello world!"))
.map_err(|_| ())
.map(|_| ())
})
}).unwrap().run();
use actix_http::{HttpService, Response};
use actix_server::Server;
use futures::future;
use http::header::HeaderValue;
use log::info;
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "hello_world=info");
env_logger::init();
Server::build()
.bind("hello-world", "127.0.0.1:8080", || {
HttpService::build()
.client_timeout(1000)
.client_disconnect(1000)
.finish(|_req| {
info!("{:?}", _req);
let mut res = Response::Ok();
res.header("x-head", HeaderValue::from_static("dummy value!"));
future::ok::<_, ()>(res.body("Hello world!"))
})
.tcp()
})?
.run()
.await
}
```

View File

@ -0,0 +1,291 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use bytes::BytesMut;
// benchmark sending all requests at the same time
fn bench_write_content_length(c: &mut Criterion) {
let mut group = c.benchmark_group("write_content_length");
let sizes = [
0, 1, 11, 83, 101, 653, 1001, 6323, 10001, 56329, 100001, 123456, 98724245,
4294967202,
];
for i in sizes.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_content_length(i, &mut b)
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_content_length(i, &mut b)
})
});
group.bench_with_input(BenchmarkId::new("itoa", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_itoa::write_content_length(i, &mut b)
})
});
}
group.finish();
}
criterion_group!(benches, bench_write_content_length);
criterion_main!(benches);
mod _itoa {
use bytes::{BufMut, BytesMut};
pub fn write_content_length(n: usize, bytes: &mut BytesMut) {
if n == 0 {
bytes.put_slice(b"\r\ncontent-length: 0\r\n");
return;
}
let mut buf = itoa::Buffer::new();
bytes.put_slice(b"\r\ncontent-length: ");
bytes.put_slice(buf.format(n).as_bytes());
bytes.put_slice(b"\r\n");
}
}
mod _new {
use bytes::{BufMut, BytesMut};
const DIGITS_START: u8 = b'0';
/// NOTE: bytes object has to contain enough space
pub fn write_content_length(n: usize, bytes: &mut BytesMut) {
if n == 0 {
bytes.put_slice(b"\r\ncontent-length: 0\r\n");
return;
}
bytes.put_slice(b"\r\ncontent-length: ");
if n < 10 {
bytes.put_u8(DIGITS_START + (n as u8));
} else if n < 100 {
let n = n as u8;
let d10 = n / 10;
let d1 = n % 10;
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 1000 {
let n = n as u16;
let d100 = (n / 100) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 10_000 {
let n = n as u16;
let d1000 = (n / 1000) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 100_000 {
let n = n as u32;
let d10000 = (n / 10000) as u8;
let d1000 = ((n / 1000) % 10) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d10000);
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 1_000_000 {
let n = n as u32;
let d100000 = (n / 100000) as u8;
let d10000 = ((n / 10000) % 10) as u8;
let d1000 = ((n / 1000) % 10) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100000);
bytes.put_u8(DIGITS_START + d10000);
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else {
write_usize(n, bytes);
}
bytes.put_slice(b"\r\n");
}
fn write_usize(n: usize, bytes: &mut BytesMut) {
let mut n = n;
// 20 chars is max length of a usize (2^64)
// digits will be added to the buffer from lsd to msd
let mut buf = BytesMut::with_capacity(20);
while n > 9 {
// "pop" the least-significant digit
let lsd = (n % 10) as u8;
// remove the lsd from n
n = n / 10;
buf.put_u8(DIGITS_START + lsd);
}
// put msd to result buffer
bytes.put_u8(DIGITS_START + (n as u8));
// put, in reverse (msd to lsd), remaining digits to buffer
for i in (0..buf.len()).rev() {
bytes.put_u8(buf[i]);
}
}
}
mod _original {
use std::{mem, ptr, slice};
use bytes::{BufMut, BytesMut};
const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\
2021222324252627282930313233343536373839\
4041424344454647484950515253545556575859\
6061626364656667686970717273747576777879\
8081828384858687888990919293949596979899";
/// NOTE: bytes object has to contain enough space
pub fn write_content_length(mut n: usize, bytes: &mut BytesMut) {
if n < 10 {
let mut buf: [u8; 21] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'\r', b'\n',
];
buf[18] = (n as u8) + b'0';
bytes.put_slice(&buf);
} else if n < 100 {
let mut buf: [u8; 22] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'\r', b'\n',
];
let d1 = n << 1;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(18),
2,
);
}
bytes.put_slice(&buf);
} else if n < 1000 {
let mut buf: [u8; 23] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'0', b'\r',
b'\n',
];
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(19),
2,
)
};
// decode last 1
buf[18] = (n as u8) + b'0';
bytes.put_slice(&buf);
} else {
bytes.put_slice(b"\r\ncontent-length: ");
convert_usize(n, bytes);
}
}
pub(crate) fn convert_usize(mut n: usize, bytes: &mut BytesMut) {
let mut curr: isize = 39;
let mut buf: [u8; 41] = unsafe { mem::MaybeUninit::uninit().assume_init() };
buf[39] = b'\r';
buf[40] = b'\n';
let buf_ptr = buf.as_mut_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
// eagerly decode 4 characters at a time
while n >= 10_000 {
let rem = (n % 10_000) as isize;
n /= 10_000;
let d1 = (rem / 100) << 1;
let d2 = (rem % 100) << 1;
curr -= 4;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
ptr::copy_nonoverlapping(
lut_ptr.offset(d2),
buf_ptr.offset(curr + 2),
2,
);
}
}
// if we reach here numbers are <= 9999, so at most 4 chars long
let mut n = n as isize; // possibly reduce 64bit math
// decode 2 more chars, if > 2 chars
if n >= 100 {
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0';
}
} else {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
unsafe {
bytes.extend_from_slice(slice::from_raw_parts(
buf_ptr.offset(curr),
41 - curr as usize,
));
}
}
}

View File

@ -0,0 +1,222 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use bytes::BytesMut;
use http::Version;
const CODES: &[u16] = &[201, 303, 404, 515];
fn bench_write_status_line_11(c: &mut Criterion) {
let mut group = c.benchmark_group("write_status_line v1.1");
let version = Version::HTTP_11;
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_naive::write_status_line(version, i, &mut b);
})
});
}
group.finish();
}
fn bench_write_status_line_10(c: &mut Criterion) {
let mut group = c.benchmark_group("write_status_line v1.0");
let version = Version::HTTP_10;
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_naive::write_status_line(version, i, &mut b);
})
});
}
group.finish();
}
fn bench_write_status_line_09(c: &mut Criterion) {
let mut group = c.benchmark_group("write_status_line v0.9");
let version = Version::HTTP_09;
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_naive::write_status_line(version, i, &mut b);
})
});
}
group.finish();
}
criterion_group!(
benches,
bench_write_status_line_11,
bench_write_status_line_10,
bench_write_status_line_09
);
criterion_main!(benches);
mod _naive {
use bytes::{BufMut, BytesMut};
use http::Version;
pub(crate) fn write_status_line(version: Version, n: u16, bytes: &mut BytesMut) {
match version {
Version::HTTP_11 => bytes.put_slice(b"HTTP/1.1 "),
Version::HTTP_10 => bytes.put_slice(b"HTTP/1.0 "),
Version::HTTP_09 => bytes.put_slice(b"HTTP/0.9 "),
_ => {
// other HTTP version handlers do not use this method
}
}
bytes.put_slice(n.to_string().as_bytes());
}
}
mod _new {
use bytes::{BufMut, BytesMut};
use http::Version;
const DIGITS_START: u8 = b'0';
pub(crate) fn write_status_line(version: Version, n: u16, bytes: &mut BytesMut) {
match version {
Version::HTTP_11 => bytes.put_slice(b"HTTP/1.1 "),
Version::HTTP_10 => bytes.put_slice(b"HTTP/1.0 "),
Version::HTTP_09 => bytes.put_slice(b"HTTP/0.9 "),
_ => {
// other HTTP version handlers do not use this method
}
}
let d100 = (n / 100) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
bytes.put_u8(b' ');
}
}
mod _original {
use std::ptr;
use bytes::{BufMut, BytesMut};
use http::Version;
const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\
2021222324252627282930313233343536373839\
4041424344454647484950515253545556575859\
6061626364656667686970717273747576777879\
8081828384858687888990919293949596979899";
pub(crate) const STATUS_LINE_BUF_SIZE: usize = 13;
pub(crate) fn write_status_line(version: Version, mut n: u16, bytes: &mut BytesMut) {
let mut buf: [u8; STATUS_LINE_BUF_SIZE] = *b"HTTP/1.1 ";
match version {
Version::HTTP_2 => buf[5] = b'2',
Version::HTTP_10 => buf[7] = b'0',
Version::HTTP_09 => {
buf[5] = b'0';
buf[7] = b'9';
}
_ => (),
}
let mut curr: isize = 12;
let buf_ptr = buf.as_mut_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
let four = n > 999;
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(
lut_ptr.offset(d1 as isize),
buf_ptr.offset(curr),
2,
);
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0';
}
} else {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(
lut_ptr.offset(d1 as isize),
buf_ptr.offset(curr),
2,
);
}
}
bytes.put_slice(&buf);
if four {
bytes.put_u8(b' ');
}
}
}

View File

@ -0,0 +1,137 @@
use criterion::{criterion_group, criterion_main, Criterion};
use bytes::BytesMut;
// A Miri run detects UB, seen on this playground:
// https://play.rust-lang.org/?version=stable&mode=debug&edition=2018&gist=f5d9aa166aa48df8dca05fce2b6c3915
fn bench_header_parsing(c: &mut Criterion) {
c.bench_function("Original (Unsound) [short]", |b| {
b.iter(|| {
let mut buf = BytesMut::from(REQ_SHORT);
_original::parse_headers(&mut buf);
})
});
c.bench_function("New (safe) [short]", |b| {
b.iter(|| {
let mut buf = BytesMut::from(REQ_SHORT);
_new::parse_headers(&mut buf);
})
});
c.bench_function("Original (Unsound) [realistic]", |b| {
b.iter(|| {
let mut buf = BytesMut::from(REQ);
_original::parse_headers(&mut buf);
})
});
c.bench_function("New (safe) [realistic]", |b| {
b.iter(|| {
let mut buf = BytesMut::from(REQ);
_new::parse_headers(&mut buf);
})
});
}
criterion_group!(benches, bench_header_parsing);
criterion_main!(benches);
const MAX_HEADERS: usize = 96;
const EMPTY_HEADER_ARRAY: [httparse::Header<'static>; MAX_HEADERS] =
[httparse::EMPTY_HEADER; MAX_HEADERS];
#[derive(Clone, Copy)]
struct HeaderIndex {
name: (usize, usize),
value: (usize, usize),
}
const EMPTY_HEADER_INDEX: HeaderIndex = HeaderIndex {
name: (0, 0),
value: (0, 0),
};
const EMPTY_HEADER_INDEX_ARRAY: [HeaderIndex; MAX_HEADERS] =
[EMPTY_HEADER_INDEX; MAX_HEADERS];
impl HeaderIndex {
fn record(
bytes: &[u8],
headers: &[httparse::Header<'_>],
indices: &mut [HeaderIndex],
) {
let bytes_ptr = bytes.as_ptr() as usize;
for (header, indices) in headers.iter().zip(indices.iter_mut()) {
let name_start = header.name.as_ptr() as usize - bytes_ptr;
let name_end = name_start + header.name.len();
indices.name = (name_start, name_end);
let value_start = header.value.as_ptr() as usize - bytes_ptr;
let value_end = value_start + header.value.len();
indices.value = (value_start, value_end);
}
}
}
// test cases taken from:
// https://github.com/seanmonstar/httparse/blob/master/benches/parse.rs
const REQ_SHORT: &'static [u8] = b"\
GET / HTTP/1.0\r\n\
Host: example.com\r\n\
Cookie: session=60; user_id=1\r\n\r\n";
const REQ: &'static [u8] = b"\
GET /wp-content/uploads/2010/03/hello-kitty-darth-vader-pink.jpg HTTP/1.1\r\n\
Host: www.kittyhell.com\r\n\
User-Agent: Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10.6; ja-JP-mac; rv:1.9.2.3) Gecko/20100401 Firefox/3.6.3 Pathtraq/0.9\r\n\
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8\r\n\
Accept-Language: ja,en-us;q=0.7,en;q=0.3\r\n\
Accept-Encoding: gzip,deflate\r\n\
Accept-Charset: Shift_JIS,utf-8;q=0.7,*;q=0.7\r\n\
Keep-Alive: 115\r\n\
Connection: keep-alive\r\n\
Cookie: wp_ozh_wsa_visits=2; wp_ozh_wsa_visit_lasttime=xxxxxxxxxx; __utma=xxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.xxxxxxxxxx.x; __utmz=xxxxxxxxx.xxxxxxxxxx.x.x.utmccn=(referral)|utmcsr=reader.livedoor.com|utmcct=/reader/|utmcmd=referral|padding=under256\r\n\r\n";
mod _new {
use super::*;
pub fn parse_headers(src: &mut BytesMut) -> usize {
let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY;
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = EMPTY_HEADER_ARRAY;
let mut req = httparse::Request::new(&mut parsed);
match req.parse(src).unwrap() {
httparse::Status::Complete(_len) => {
HeaderIndex::record(src, req.headers, &mut headers);
req.headers.len()
}
_ => unreachable!(),
}
}
}
mod _original {
use super::*;
use std::mem::MaybeUninit;
pub fn parse_headers(src: &mut BytesMut) -> usize {
let mut headers: [HeaderIndex; MAX_HEADERS] =
unsafe { MaybeUninit::uninit().assume_init() };
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] =
unsafe { MaybeUninit::uninit().assume_init() };
let mut req = httparse::Request::new(&mut parsed);
match req.parse(src).unwrap() {
httparse::Status::Complete(_len) => {
HeaderIndex::record(src, req.headers, &mut headers);
req.headers.len()
}
_ => unreachable!(),
}
}
}

View File

@ -1,13 +1,14 @@
use std::{env, io};
use actix_http::{error::PayloadError, HttpService, Request, Response};
use actix_http::{Error, HttpService, Request, Response};
use actix_server::Server;
use bytes::BytesMut;
use futures::{Future, Stream};
use futures_util::StreamExt;
use http::header::HeaderValue;
use log::info;
fn main() -> io::Result<()> {
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "echo=info");
env_logger::init();
@ -16,22 +17,21 @@ fn main() -> io::Result<()> {
HttpService::build()
.client_timeout(1000)
.client_disconnect(1000)
.finish(|mut req: Request| {
req.take_payload()
.fold(BytesMut::new(), move |mut body, chunk| {
body.extend_from_slice(&chunk);
Ok::<_, PayloadError>(body)
})
.and_then(|bytes| {
info!("request body: {:?}", bytes);
let mut res = Response::Ok();
res.header(
"x-head",
HeaderValue::from_static("dummy value!"),
);
Ok(res.body(bytes))
})
.finish(|mut req: Request| async move {
let mut body = BytesMut::new();
while let Some(item) = req.payload().next().await {
body.extend_from_slice(&item?);
}
info!("request body: {:?}", body);
Ok::<_, Error>(
Response::Ok()
.header("x-head", HeaderValue::from_static("dummy value!"))
.body(body),
)
})
.tcp()
})?
.run()
.await
}

View File

@ -1,34 +1,33 @@
use std::{env, io};
use actix_http::http::HeaderValue;
use actix_http::{error::PayloadError, Error, HttpService, Request, Response};
use actix_http::{Error, HttpService, Request, Response};
use actix_server::Server;
use bytes::BytesMut;
use futures::{Future, Stream};
use futures_util::StreamExt;
use log::info;
fn handle_request(mut req: Request) -> impl Future<Item = Response, Error = Error> {
req.take_payload()
.fold(BytesMut::new(), move |mut body, chunk| {
body.extend_from_slice(&chunk);
Ok::<_, PayloadError>(body)
})
.from_err()
.and_then(|bytes| {
info!("request body: {:?}", bytes);
let mut res = Response::Ok();
res.header("x-head", HeaderValue::from_static("dummy value!"));
Ok(res.body(bytes))
})
async fn handle_request(mut req: Request) -> Result<Response, Error> {
let mut body = BytesMut::new();
while let Some(item) = req.payload().next().await {
body.extend_from_slice(&item?)
}
info!("request body: {:?}", body);
Ok(Response::Ok()
.header("x-head", HeaderValue::from_static("dummy value!"))
.body(body))
}
fn main() -> io::Result<()> {
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "echo=info");
env_logger::init();
Server::build()
.bind("echo", "127.0.0.1:8080", || {
HttpService::build().finish(|_req: Request| handle_request(_req))
HttpService::build().finish(handle_request).tcp()
})?
.run()
.await
}

View File

@ -1,28 +0,0 @@
use std::{env, io};
use actix_codec::Framed;
use actix_http::{h1, Response, SendResponse, ServiceConfig};
use actix_server::{Io, Server};
use actix_service::{fn_service, NewService};
use actix_utils::framed::IntoFramed;
use actix_utils::stream::TakeItem;
use futures::Future;
use tokio_tcp::TcpStream;
fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "framed_hello=info");
env_logger::init();
Server::build()
.bind("framed_hello", "127.0.0.1:8080", || {
fn_service(|io: Io<TcpStream>| Ok(io.into_parts().0))
.and_then(IntoFramed::new(|| h1::Codec::new(ServiceConfig::default())))
.and_then(TakeItem::new().map_err(|_| ()))
.and_then(|(_req, _framed): (_, Framed<_, _>)| {
SendResponse::send(_framed, Response::Ok().body("Hello world!"))
.map_err(|_| ())
.map(|_| ())
})
})?
.run()
}

View File

@ -2,11 +2,12 @@ use std::{env, io};
use actix_http::{HttpService, Response};
use actix_server::Server;
use futures::future;
use futures_util::future;
use http::header::HeaderValue;
use log::info;
fn main() -> io::Result<()> {
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "hello_world=info");
env_logger::init();
@ -21,6 +22,8 @@ fn main() -> io::Result<()> {
res.header("x-head", HeaderValue::from_static("dummy value!"));
future::ok::<_, ()>(res.body("Hello world!"))
})
.tcp()
})?
.run()
.await
}

View File

@ -1,8 +1,12 @@
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, mem};
use bytes::{Bytes, BytesMut};
use futures::{Async, Poll, Stream};
use futures_core::Stream;
use futures_util::ready;
use pin_project::pin_project;
use crate::error::Error;
@ -11,53 +15,60 @@ use crate::error::Error;
pub enum BodySize {
None,
Empty,
Sized(usize),
Sized64(u64),
Sized(u64),
Stream,
}
impl BodySize {
pub fn is_eof(&self) -> bool {
match self {
BodySize::None
| BodySize::Empty
| BodySize::Sized(0)
| BodySize::Sized64(0) => true,
_ => false,
}
matches!(self, BodySize::None | BodySize::Empty | BodySize::Sized(0))
}
}
/// Type that provides this trait can be streamed to a peer.
pub trait MessageBody {
fn length(&self) -> BodySize;
fn size(&self) -> BodySize;
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error>;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>>;
downcast_get_type_id!();
}
downcast!(MessageBody);
impl MessageBody for () {
fn length(&self) -> BodySize {
fn size(&self) -> BodySize {
BodySize::Empty
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
Ok(Async::Ready(None))
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
Poll::Ready(None)
}
}
impl<T: MessageBody> MessageBody for Box<T> {
fn length(&self) -> BodySize {
self.as_ref().length()
impl<T: MessageBody + Unpin> MessageBody for Box<T> {
fn size(&self) -> BodySize {
self.as_ref().size()
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
self.as_mut().poll_next()
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
Pin::new(self.get_mut().as_mut()).poll_next(cx)
}
}
#[pin_project(project = ResponseBodyProj)]
pub enum ResponseBody<B> {
Body(B),
Other(Body),
Body(#[pin] B),
Other(#[pin] Body),
}
impl ResponseBody<Body> {
@ -86,30 +97,39 @@ impl<B: MessageBody> ResponseBody<B> {
}
impl<B: MessageBody> MessageBody for ResponseBody<B> {
fn length(&self) -> BodySize {
fn size(&self) -> BodySize {
match self {
ResponseBody::Body(ref body) => body.length(),
ResponseBody::Other(ref body) => body.length(),
ResponseBody::Body(ref body) => body.size(),
ResponseBody::Other(ref body) => body.size(),
}
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
match self {
ResponseBody::Body(ref mut body) => body.poll_next(),
ResponseBody::Other(ref mut body) => body.poll_next(),
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
match self.project() {
ResponseBodyProj::Body(body) => body.poll_next(cx),
ResponseBodyProj::Other(body) => body.poll_next(cx),
}
}
}
impl<B: MessageBody> Stream for ResponseBody<B> {
type Item = Bytes;
type Error = Error;
type Item = Result<Bytes, Error>;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
self.poll_next()
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
match self.project() {
ResponseBodyProj::Body(body) => body.poll_next(cx),
ResponseBodyProj::Other(body) => body.poll_next(cx),
}
}
}
#[pin_project(project = BodyProj)]
/// Represents various types of http message body.
pub enum Body {
/// Empty response. `Content-Length` header is not set.
@ -119,44 +139,47 @@ pub enum Body {
/// Specific response body.
Bytes(Bytes),
/// Generic message body.
Message(Box<dyn MessageBody>),
Message(Box<dyn MessageBody + Unpin>),
}
impl Body {
/// Create body from slice (copy)
pub fn from_slice(s: &[u8]) -> Body {
Body::Bytes(Bytes::from(s))
Body::Bytes(Bytes::copy_from_slice(s))
}
/// Create body from generic message body.
pub fn from_message<B: MessageBody + 'static>(body: B) -> Body {
pub fn from_message<B: MessageBody + Unpin + 'static>(body: B) -> Body {
Body::Message(Box::new(body))
}
}
impl MessageBody for Body {
fn length(&self) -> BodySize {
fn size(&self) -> BodySize {
match self {
Body::None => BodySize::None,
Body::Empty => BodySize::Empty,
Body::Bytes(ref bin) => BodySize::Sized(bin.len()),
Body::Message(ref body) => body.length(),
Body::Bytes(ref bin) => BodySize::Sized(bin.len() as u64),
Body::Message(ref body) => body.size(),
}
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
match self {
Body::None => Ok(Async::Ready(None)),
Body::Empty => Ok(Async::Ready(None)),
Body::Bytes(ref mut bin) => {
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
match self.project() {
BodyProj::None => Poll::Ready(None),
BodyProj::Empty => Poll::Ready(None),
BodyProj::Bytes(ref mut bin) => {
let len = bin.len();
if len == 0 {
Ok(Async::Ready(None))
Poll::Ready(None)
} else {
Ok(Async::Ready(Some(bin.split_to(len))))
Poll::Ready(Some(Ok(mem::take(bin))))
}
}
Body::Message(ref mut body) => body.poll_next(),
BodyProj::Message(ref mut body) => Pin::new(body.as_mut()).poll_next(cx),
}
}
}
@ -164,14 +187,8 @@ impl MessageBody for Body {
impl PartialEq for Body {
fn eq(&self, other: &Body) -> bool {
match *self {
Body::None => match *other {
Body::None => true,
_ => false,
},
Body::Empty => match *other {
Body::Empty => true,
_ => false,
},
Body::None => matches!(*other, Body::None),
Body::Empty => matches!(*other, Body::Empty),
Body::Bytes(ref b) => match *other {
Body::Bytes(ref b2) => b == b2,
_ => false,
@ -182,10 +199,10 @@ impl PartialEq for Body {
}
impl fmt::Debug for Body {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
Body::None => write!(f, "Body::None"),
Body::Empty => write!(f, "Body::Zero"),
Body::Empty => write!(f, "Body::Empty"),
Body::Bytes(ref b) => write!(f, "Body::Bytes({:?})", b),
Body::Message(_) => write!(f, "Body::Message(_)"),
}
@ -218,7 +235,7 @@ impl From<String> for Body {
impl<'a> From<&'a String> for Body {
fn from(s: &'a String) -> Body {
Body::Bytes(Bytes::from(AsRef::<[u8]>::as_ref(&s)))
Body::Bytes(Bytes::copy_from_slice(AsRef::<[u8]>::as_ref(&s)))
}
}
@ -234,96 +251,115 @@ impl From<BytesMut> for Body {
}
}
impl From<serde_json::Value> for Body {
fn from(v: serde_json::Value) -> Body {
Body::Bytes(v.to_string().into())
}
}
impl<S> From<SizedStream<S>> for Body
where
S: Stream<Item = Result<Bytes, Error>> + Unpin + 'static,
{
fn from(s: SizedStream<S>) -> Body {
Body::from_message(s)
}
}
impl<S, E> From<BodyStream<S, E>> for Body
where
S: Stream<Item = Result<Bytes, E>> + Unpin + 'static,
E: Into<Error> + 'static,
{
fn from(s: BodyStream<S, E>) -> Body {
Body::from_message(s)
}
}
impl MessageBody for Bytes {
fn length(&self) -> BodySize {
BodySize::Sized(self.len())
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Ok(Async::Ready(None))
Poll::Ready(None)
} else {
Ok(Async::Ready(Some(mem::replace(self, Bytes::new()))))
Poll::Ready(Some(Ok(mem::take(self.get_mut()))))
}
}
}
impl MessageBody for BytesMut {
fn length(&self) -> BodySize {
BodySize::Sized(self.len())
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Ok(Async::Ready(None))
Poll::Ready(None)
} else {
Ok(Async::Ready(Some(
mem::replace(self, BytesMut::new()).freeze(),
)))
Poll::Ready(Some(Ok(mem::take(self.get_mut()).freeze())))
}
}
}
impl MessageBody for &'static str {
fn length(&self) -> BodySize {
BodySize::Sized(self.len())
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Ok(Async::Ready(None))
Poll::Ready(None)
} else {
Ok(Async::Ready(Some(Bytes::from_static(
mem::replace(self, "").as_ref(),
Poll::Ready(Some(Ok(Bytes::from_static(
mem::take(self.get_mut()).as_ref(),
))))
}
}
}
impl MessageBody for &'static [u8] {
fn length(&self) -> BodySize {
BodySize::Sized(self.len())
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
if self.is_empty() {
Ok(Async::Ready(None))
} else {
Ok(Async::Ready(Some(Bytes::from_static(mem::replace(
self, b"",
)))))
}
}
}
impl MessageBody for Vec<u8> {
fn length(&self) -> BodySize {
BodySize::Sized(self.len())
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Ok(Async::Ready(None))
Poll::Ready(None)
} else {
Ok(Async::Ready(Some(Bytes::from(mem::replace(
self,
Vec::new(),
)))))
Poll::Ready(Some(Ok(Bytes::from(mem::take(self.get_mut())))))
}
}
}
impl MessageBody for String {
fn length(&self) -> BodySize {
BodySize::Sized(self.len())
fn size(&self) -> BodySize {
BodySize::Sized(self.len() as u64)
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Ok(Async::Ready(None))
Poll::Ready(None)
} else {
Ok(Async::Ready(Some(Bytes::from(
mem::replace(self, String::new()).into_bytes(),
Poll::Ready(Some(Ok(Bytes::from(
mem::take(self.get_mut()).into_bytes(),
))))
}
}
@ -331,14 +367,16 @@ impl MessageBody for String {
/// Type represent streaming body.
/// Response does not contain `content-length` header and appropriate transfer encoding is used.
pub struct BodyStream<S, E> {
#[pin_project]
pub struct BodyStream<S: Unpin, E> {
#[pin]
stream: S,
_t: PhantomData<E>,
}
impl<S, E> BodyStream<S, E>
where
S: Stream<Item = Bytes, Error = E>,
S: Stream<Item = Result<Bytes, E>> + Unpin,
E: Into<Error>,
{
pub fn new(stream: S) -> Self {
@ -351,50 +389,85 @@ where
impl<S, E> MessageBody for BodyStream<S, E>
where
S: Stream<Item = Bytes, Error = E>,
S: Stream<Item = Result<Bytes, E>> + Unpin,
E: Into<Error>,
{
fn length(&self) -> BodySize {
fn size(&self) -> BodySize {
BodySize::Stream
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
self.stream.poll().map_err(|e| e.into())
/// Attempts to pull out the next value of the underlying [`Stream`].
///
/// Empty values are skipped to prevent [`BodyStream`]'s transmission being
/// ended on a zero-length chunk, but rather proceed until the underlying
/// [`Stream`] ends.
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
let mut stream = self.project().stream;
loop {
let stream = stream.as_mut();
return Poll::Ready(match ready!(stream.poll_next(cx)) {
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
opt => opt.map(|res| res.map_err(Into::into)),
});
}
}
}
/// Type represent streaming body. This body implementation should be used
/// if total size of stream is known. Data get sent as is without using transfer encoding.
pub struct SizedStream<S> {
size: usize,
#[pin_project]
pub struct SizedStream<S: Unpin> {
size: u64,
#[pin]
stream: S,
}
impl<S> SizedStream<S>
where
S: Stream<Item = Bytes, Error = Error>,
S: Stream<Item = Result<Bytes, Error>> + Unpin,
{
pub fn new(size: usize, stream: S) -> Self {
pub fn new(size: u64, stream: S) -> Self {
SizedStream { size, stream }
}
}
impl<S> MessageBody for SizedStream<S>
where
S: Stream<Item = Bytes, Error = Error>,
S: Stream<Item = Result<Bytes, Error>> + Unpin,
{
fn length(&self) -> BodySize {
BodySize::Sized(self.size)
fn size(&self) -> BodySize {
BodySize::Sized(self.size as u64)
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
self.stream.poll()
/// Attempts to pull out the next value of the underlying [`Stream`].
///
/// Empty values are skipped to prevent [`SizedStream`]'s transmission being
/// ended on a zero-length chunk, but rather proceed until the underlying
/// [`Stream`] ends.
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
let mut stream: Pin<&mut S> = self.project().stream;
loop {
let stream = stream.as_mut();
return Poll::Ready(match ready!(stream.poll_next(cx)) {
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
val => val,
});
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures_util::future::poll_fn;
use futures_util::pin_mut;
use futures_util::stream;
impl Body {
pub(crate) fn get_ref(&self) -> &[u8] {
@ -414,49 +487,237 @@ mod tests {
}
}
#[test]
fn test_static_str() {
assert_eq!(Body::from("").length(), BodySize::Sized(0));
assert_eq!(Body::from("test").length(), BodySize::Sized(4));
#[actix_rt::test]
async fn test_static_str() {
assert_eq!(Body::from("").size(), BodySize::Sized(0));
assert_eq!(Body::from("test").size(), BodySize::Sized(4));
assert_eq!(Body::from("test").get_ref(), b"test");
assert_eq!("test".size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| Pin::new(&mut "test").poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("test"))
);
}
#[test]
fn test_static_bytes() {
assert_eq!(Body::from(b"test".as_ref()).length(), BodySize::Sized(4));
#[actix_rt::test]
async fn test_static_bytes() {
assert_eq!(Body::from(b"test".as_ref()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b"test".as_ref()).get_ref(), b"test");
assert_eq!(
Body::from_slice(b"test".as_ref()).length(),
Body::from_slice(b"test".as_ref()).size(),
BodySize::Sized(4)
);
assert_eq!(Body::from_slice(b"test".as_ref()).get_ref(), b"test");
let sb = Bytes::from(&b"test"[..]);
pin_mut!(sb);
assert_eq!(sb.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| sb.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[test]
fn test_vec() {
assert_eq!(Body::from(Vec::from("test")).length(), BodySize::Sized(4));
#[actix_rt::test]
async fn test_vec() {
assert_eq!(Body::from(Vec::from("test")).size(), BodySize::Sized(4));
assert_eq!(Body::from(Vec::from("test")).get_ref(), b"test");
let test_vec = Vec::from("test");
pin_mut!(test_vec);
assert_eq!(test_vec.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| test_vec.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("test"))
);
}
#[test]
fn test_bytes() {
assert_eq!(Body::from(Bytes::from("test")).length(), BodySize::Sized(4));
assert_eq!(Body::from(Bytes::from("test")).get_ref(), b"test");
}
#[test]
fn test_string() {
let b = "test".to_owned();
assert_eq!(Body::from(b.clone()).length(), BodySize::Sized(4));
#[actix_rt::test]
async fn test_bytes() {
let b = Bytes::from("test");
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
assert_eq!(Body::from(&b).length(), BodySize::Sized(4));
assert_eq!(Body::from(&b).get_ref(), b"test");
pin_mut!(b);
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[test]
fn test_bytes_mut() {
#[actix_rt::test]
async fn test_bytes_mut() {
let b = BytesMut::from("test");
assert_eq!(Body::from(b.clone()).length(), BodySize::Sized(4));
assert_eq!(Body::from(b).get_ref(), b"test");
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
pin_mut!(b);
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_string() {
let b = "test".to_owned();
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
assert_eq!(Body::from(&b).size(), BodySize::Sized(4));
assert_eq!(Body::from(&b).get_ref(), b"test");
pin_mut!(b);
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_unit() {
assert_eq!(().size(), BodySize::Empty);
assert!(poll_fn(|cx| Pin::new(&mut ()).poll_next(cx))
.await
.is_none());
}
#[actix_rt::test]
async fn test_box() {
let val = Box::new(());
pin_mut!(val);
assert_eq!(val.size(), BodySize::Empty);
assert!(poll_fn(|cx| val.as_mut().poll_next(cx)).await.is_none());
}
#[actix_rt::test]
async fn test_body_eq() {
assert!(
Body::Bytes(Bytes::from_static(b"1"))
== Body::Bytes(Bytes::from_static(b"1"))
);
assert!(Body::Bytes(Bytes::from_static(b"1")) != Body::None);
}
#[actix_rt::test]
async fn test_body_debug() {
assert!(format!("{:?}", Body::None).contains("Body::None"));
assert!(format!("{:?}", Body::Empty).contains("Body::Empty"));
assert!(format!("{:?}", Body::Bytes(Bytes::from_static(b"1"))).contains('1'));
}
#[actix_rt::test]
async fn test_serde_json() {
use serde_json::json;
assert_eq!(
Body::from(serde_json::Value::String("test".into())).size(),
BodySize::Sized(6)
);
assert_eq!(
Body::from(json!({"test-key":"test-value"})).size(),
BodySize::Sized(25)
);
}
mod body_stream {
use super::*;
//use futures::task::noop_waker;
//use futures::stream::once;
#[actix_rt::test]
async fn skips_empty_chunks() {
let body = BodyStream::new(stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok(Bytes::from(v)) as Result<Bytes, ()>),
));
pin_mut!(body);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("1")),
);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("2")),
);
}
/* Now it does not compile as it should
#[actix_rt::test]
async fn move_pinned_pointer() {
let (sender, receiver) = futures::channel::oneshot::channel();
let mut body_stream = Ok(BodyStream::new(once(async {
let x = Box::new(0i32);
let y = &x;
receiver.await.unwrap();
let _z = **y;
Ok::<_, ()>(Bytes::new())
})));
let waker = noop_waker();
let mut context = Context::from_waker(&waker);
pin_mut!(body_stream);
let _ = body_stream.as_mut().unwrap().poll_next(&mut context);
sender.send(()).unwrap();
let _ = std::mem::replace(&mut body_stream, Err([0; 32])).unwrap().poll_next(&mut context);
}*/
}
mod sized_stream {
use super::*;
#[actix_rt::test]
async fn skips_empty_chunks() {
let body = SizedStream::new(
2,
stream::iter(["1", "", "2"].iter().map(|&v| Ok(Bytes::from(v)))),
);
pin_mut!(body);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("1")),
);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("2")),
);
}
}
#[actix_rt::test]
async fn test_body_casting() {
let mut body = String::from("hello cast");
let resp_body: &mut dyn MessageBody = &mut body;
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast");
let body = &mut resp_body.downcast_mut::<String>().unwrap();
body.push('!');
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast!");
let not_body = resp_body.downcast_ref::<()>();
assert!(not_body.is_none());
}
}

View File

@ -1,53 +1,94 @@
use std::fmt::Debug;
use std::marker::PhantomData;
use std::rc::Rc;
use std::{fmt, net};
use actix_server_config::ServerConfig as SrvConfig;
use actix_service::{IntoNewService, NewService};
use actix_codec::Framed;
use actix_service::{IntoServiceFactory, Service, ServiceFactory};
use crate::body::MessageBody;
use crate::config::{KeepAlive, ServiceConfig};
use crate::error::Error;
use crate::h1::{Codec, ExpectHandler, H1Service, UpgradeHandler};
use crate::h2::H2Service;
use crate::helpers::{Data, DataFactory};
use crate::request::Request;
use crate::response::Response;
use crate::h1::H1Service;
use crate::h2::H2Service;
use crate::service::HttpService;
/// A http service builder
///
/// This type can be used to construct an instance of `http service` through a
/// builder-like pattern.
pub struct HttpServiceBuilder<T, S> {
pub struct HttpServiceBuilder<T, S, X = ExpectHandler, U = UpgradeHandler<T>> {
keep_alive: KeepAlive,
client_timeout: u64,
client_disconnect: u64,
secure: bool,
local_addr: Option<net::SocketAddr>,
expect: X,
upgrade: Option<U>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
_t: PhantomData<(T, S)>,
}
impl<T, S> HttpServiceBuilder<T, S>
impl<T, S> HttpServiceBuilder<T, S, ExpectHandler, UpgradeHandler<T>>
where
S: NewService<SrvConfig, Request = Request>,
S::Error: Debug + 'static,
S::Service: 'static,
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug,
<S::Service as Service>::Future: 'static,
{
/// Create instance of `ServiceConfigBuilder`
pub fn new() -> HttpServiceBuilder<T, S> {
pub fn new() -> Self {
HttpServiceBuilder {
keep_alive: KeepAlive::Timeout(5),
client_timeout: 5000,
client_disconnect: 0,
secure: false,
local_addr: None,
expect: ExpectHandler,
upgrade: None,
on_connect: None,
_t: PhantomData,
}
}
}
impl<T, S, X, U> HttpServiceBuilder<T, S, X, U>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug,
<S::Service as Service>::Future: 'static,
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
<X::Service as Service>::Future: 'static,
U: ServiceFactory<Config = (), Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
U::InitError: fmt::Debug,
<U::Service as Service>::Future: 'static,
{
/// Set server keep-alive setting.
///
/// By default keep alive is set to a 5 seconds.
pub fn keep_alive<U: Into<KeepAlive>>(mut self, val: U) -> Self {
pub fn keep_alive<W: Into<KeepAlive>>(mut self, val: W) -> Self {
self.keep_alive = val.into();
self
}
/// Set connection secure state
pub fn secure(mut self) -> Self {
self.secure = true;
self
}
/// Set the local address that this service is bound to.
pub fn local_addr(mut self, addr: net::SocketAddr) -> Self {
self.local_addr = Some(addr);
self
}
/// Set server client timeout in milliseconds for first request.
///
/// Defines a timeout for reading client request header. If a client does not transmit
@ -75,67 +116,136 @@ where
self
}
// #[cfg(feature = "ssl")]
// /// Configure alpn protocols for SslAcceptorBuilder.
// pub fn configure_openssl(
// builder: &mut openssl::ssl::SslAcceptorBuilder,
// ) -> io::Result<()> {
// let protos: &[u8] = b"\x02h2";
// builder.set_alpn_select_callback(|_, protos| {
// const H2: &[u8] = b"\x02h2";
// if protos.windows(3).any(|window| window == H2) {
// Ok(b"h2")
// } else {
// Err(openssl::ssl::AlpnError::NOACK)
// }
// });
// builder.set_alpn_protos(&protos)?;
/// Provide service for `EXPECT: 100-Continue` support.
///
/// Service get called with request that contains `EXPECT` header.
/// Service must return request in case of success, in that case
/// request will be forwarded to main service.
pub fn expect<F, X1>(self, expect: F) -> HttpServiceBuilder<T, S, X1, U>
where
F: IntoServiceFactory<X1>,
X1: ServiceFactory<Config = (), Request = Request, Response = Request>,
X1::Error: Into<Error>,
X1::InitError: fmt::Debug,
<X1::Service as Service>::Future: 'static,
{
HttpServiceBuilder {
keep_alive: self.keep_alive,
client_timeout: self.client_timeout,
client_disconnect: self.client_disconnect,
secure: self.secure,
local_addr: self.local_addr,
expect: expect.into_factory(),
upgrade: self.upgrade,
on_connect: self.on_connect,
_t: PhantomData,
}
}
// Ok(())
// }
/// Provide service for custom `Connection: UPGRADE` support.
///
/// If service is provided then normal requests handling get halted
/// and this service get called with original request and framed object.
pub fn upgrade<F, U1>(self, upgrade: F) -> HttpServiceBuilder<T, S, X, U1>
where
F: IntoServiceFactory<U1>,
U1: ServiceFactory<
Config = (),
Request = (Request, Framed<T, Codec>),
Response = (),
>,
U1::Error: fmt::Display,
U1::InitError: fmt::Debug,
<U1::Service as Service>::Future: 'static,
{
HttpServiceBuilder {
keep_alive: self.keep_alive,
client_timeout: self.client_timeout,
client_disconnect: self.client_disconnect,
secure: self.secure,
local_addr: self.local_addr,
expect: self.expect,
upgrade: Some(upgrade.into_factory()),
on_connect: self.on_connect,
_t: PhantomData,
}
}
/// Set on-connect callback.
///
/// It get called once per connection and result of the call
/// get stored to the request's extensions.
pub fn on_connect<F, I>(mut self, f: F) -> Self
where
F: Fn(&T) -> I + 'static,
I: Clone + 'static,
{
self.on_connect = Some(Rc::new(move |io| Box::new(Data(f(io)))));
self
}
/// Finish service configuration and create *http service* for HTTP/1 protocol.
pub fn h1<F, P, B>(self, service: F) -> H1Service<T, P, S, B>
pub fn h1<F, B>(self, service: F) -> H1Service<T, S, B, X, U>
where
B: MessageBody + 'static,
F: IntoNewService<S, SrvConfig>,
B: MessageBody,
F: IntoServiceFactory<S>,
S::Error: Into<Error>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
{
let cfg = ServiceConfig::new(
self.keep_alive,
self.client_timeout,
self.client_disconnect,
self.secure,
self.local_addr,
);
H1Service::with_config(cfg, service.into_new_service())
H1Service::with_config(cfg, service.into_factory())
.expect(self.expect)
.upgrade(self.upgrade)
.on_connect(self.on_connect)
}
/// Finish service configuration and create *http service* for HTTP/2 protocol.
pub fn h2<F, P, B>(self, service: F) -> H2Service<T, P, S, B>
pub fn h2<F, B>(self, service: F) -> H2Service<T, S, B>
where
B: MessageBody + 'static,
F: IntoNewService<S, SrvConfig>,
S::Response: Into<Response<B>>,
F: IntoServiceFactory<S>,
S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
{
let cfg = ServiceConfig::new(
self.keep_alive,
self.client_timeout,
self.client_disconnect,
self.secure,
self.local_addr,
);
H2Service::with_config(cfg, service.into_new_service())
H2Service::with_config(cfg, service.into_factory()).on_connect(self.on_connect)
}
/// Finish service configuration and create `HttpService` instance.
pub fn finish<F, P, B>(self, service: F) -> HttpService<T, P, S, B>
pub fn finish<F, B>(self, service: F) -> HttpService<T, S, B, X, U>
where
B: MessageBody + 'static,
F: IntoNewService<S, SrvConfig>,
S::Response: Into<Response<B>>,
F: IntoServiceFactory<S>,
S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
{
let cfg = ServiceConfig::new(
self.keep_alive,
self.client_timeout,
self.client_disconnect,
self.secure,
self.local_addr,
);
HttpService::with_config(cfg, service.into_new_service())
HttpService::with_config(cfg, service.into_factory())
.expect(self.expect)
.upgrade(self.upgrade)
.on_connect(self.on_connect)
}
}

View File

@ -0,0 +1,39 @@
use std::time::Duration;
// These values are taken from hyper/src/proto/h2/client.rs
const DEFAULT_H2_CONN_WINDOW: u32 = 1024 * 1024 * 2; // 2mb
const DEFAULT_H2_STREAM_WINDOW: u32 = 1024 * 1024; // 1mb
/// Connector configuration
#[derive(Clone)]
pub(crate) struct ConnectorConfig {
pub(crate) timeout: Duration,
pub(crate) conn_lifetime: Duration,
pub(crate) conn_keep_alive: Duration,
pub(crate) disconnect_timeout: Option<Duration>,
pub(crate) limit: usize,
pub(crate) conn_window_size: u32,
pub(crate) stream_window_size: u32,
}
impl Default for ConnectorConfig {
fn default() -> Self {
Self {
timeout: Duration::from_secs(1),
conn_lifetime: Duration::from_secs(75),
conn_keep_alive: Duration::from_secs(15),
disconnect_timeout: Some(Duration::from_millis(3000)),
limit: 100,
conn_window_size: DEFAULT_H2_CONN_WINDOW,
stream_window_size: DEFAULT_H2_STREAM_WINDOW,
}
}
}
impl ConnectorConfig {
pub(crate) fn no_disconnect_timeout(&self) -> Self {
let mut res = self.clone();
res.disconnect_timeout = None;
res
}
}

View File

@ -1,18 +1,21 @@
use std::{fmt, io, time};
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, io, mem, time};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use bytes::{Buf, Bytes};
use futures::future::{err, Either, Future, FutureResult};
use futures::Poll;
use futures_util::future::{err, Either, FutureExt, LocalBoxFuture, Ready};
use h2::client::SendRequest;
use pin_project::pin_project;
use crate::body::MessageBody;
use crate::h1::ClientCodec;
use crate::message::{RequestHead, ResponseHead};
use crate::message::{RequestHeadType, ResponseHead};
use crate::payload::Payload;
use super::error::SendRequestError;
use super::pool::Acquired;
use super::pool::{Acquired, Protocol};
use super::{h1proto, h2proto};
pub(crate) enum ConnectionType<Io> {
@ -21,31 +24,32 @@ pub(crate) enum ConnectionType<Io> {
}
pub trait Connection {
type Io: AsyncRead + AsyncWrite;
type Future: Future<Item = (ResponseHead, Payload), Error = SendRequestError>;
type Io: AsyncRead + AsyncWrite + Unpin;
type Future: Future<Output = Result<(ResponseHead, Payload), SendRequestError>>;
fn protocol(&self) -> Protocol;
/// Send request and body
fn send_request<B: MessageBody + 'static>(
fn send_request<B: MessageBody + 'static, H: Into<RequestHeadType>>(
self,
head: RequestHead,
head: H,
body: B,
) -> Self::Future;
type TunnelFuture: Future<
Item = (ResponseHead, Framed<Self::Io, ClientCodec>),
Error = SendRequestError,
Output = Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
>;
/// Send request, returns Response and Framed
fn open_tunnel(self, head: RequestHead) -> Self::TunnelFuture;
fn open_tunnel<H: Into<RequestHeadType>>(self, head: H) -> Self::TunnelFuture;
}
pub(crate) trait ConnectionLifetime: AsyncRead + AsyncWrite + 'static {
/// Close connection
fn close(&mut self);
fn close(self: Pin<&mut Self>);
/// Release connection to the connection pool
fn release(&mut self);
fn release(self: Pin<&mut Self>);
}
#[doc(hidden)]
@ -60,7 +64,7 @@ impl<T> fmt::Debug for IoConnection<T>
where
T: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.io {
Some(ConnectionType::H1(ref io)) => write!(f, "H1Connection({:?})", io),
Some(ConnectionType::H2(_)) => write!(f, "H2Connection"),
@ -69,7 +73,7 @@ where
}
}
impl<T: AsyncRead + AsyncWrite + 'static> IoConnection<T> {
impl<T: AsyncRead + AsyncWrite + Unpin> IoConnection<T> {
pub(crate) fn new(
io: ConnectionType<T>,
created: time::Instant,
@ -89,49 +93,50 @@ impl<T: AsyncRead + AsyncWrite + 'static> IoConnection<T> {
impl<T> Connection for IoConnection<T>
where
T: AsyncRead + AsyncWrite + 'static,
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Io = T;
type Future = Box<Future<Item = (ResponseHead, Payload), Error = SendRequestError>>;
type Future =
LocalBoxFuture<'static, Result<(ResponseHead, Payload), SendRequestError>>;
fn send_request<B: MessageBody + 'static>(
fn protocol(&self) -> Protocol {
match self.io {
Some(ConnectionType::H1(_)) => Protocol::Http1,
Some(ConnectionType::H2(_)) => Protocol::Http2,
None => Protocol::Http1,
}
}
fn send_request<B: MessageBody + 'static, H: Into<RequestHeadType>>(
mut self,
head: RequestHead,
head: H,
body: B,
) -> Self::Future {
match self.io.take().unwrap() {
ConnectionType::H1(io) => Box::new(h1proto::send_request(
io,
head,
body,
self.created,
self.pool,
)),
ConnectionType::H2(io) => Box::new(h2proto::send_request(
io,
head,
body,
self.created,
self.pool,
)),
ConnectionType::H1(io) => {
h1proto::send_request(io, head.into(), body, self.created, self.pool)
.boxed_local()
}
ConnectionType::H2(io) => {
h2proto::send_request(io, head.into(), body, self.created, self.pool)
.boxed_local()
}
}
}
type TunnelFuture = Either<
Box<
Future<
Item = (ResponseHead, Framed<Self::Io, ClientCodec>),
Error = SendRequestError,
>,
LocalBoxFuture<
'static,
Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
>,
FutureResult<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
Ready<Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>>,
>;
/// Send request, returns Response and Framed
fn open_tunnel(mut self, head: RequestHead) -> Self::TunnelFuture {
fn open_tunnel<H: Into<RequestHeadType>>(mut self, head: H) -> Self::TunnelFuture {
match self.io.take().unwrap() {
ConnectionType::H1(io) => {
Either::A(Box::new(h1proto::open_tunnel(io, head)))
Either::Left(h1proto::open_tunnel(io, head.into()).boxed_local())
}
ConnectionType::H2(io) => {
if let Some(mut pool) = self.pool.take() {
@ -141,7 +146,7 @@ where
None,
));
}
Either::B(err(SendRequestError::TunnelNotSupported))
Either::Right(err(SendRequestError::TunnelNotSupported))
}
}
}
@ -155,15 +160,23 @@ pub(crate) enum EitherConnection<A, B> {
impl<A, B> Connection for EitherConnection<A, B>
where
A: AsyncRead + AsyncWrite + 'static,
B: AsyncRead + AsyncWrite + 'static,
A: AsyncRead + AsyncWrite + Unpin + 'static,
B: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Io = EitherIo<A, B>;
type Future = Box<Future<Item = (ResponseHead, Payload), Error = SendRequestError>>;
type Future =
LocalBoxFuture<'static, Result<(ResponseHead, Payload), SendRequestError>>;
fn send_request<RB: MessageBody + 'static>(
fn protocol(&self) -> Protocol {
match self {
EitherConnection::A(con) => con.protocol(),
EitherConnection::B(con) => con.protocol(),
}
}
fn send_request<RB: MessageBody + 'static, H: Into<RequestHeadType>>(
self,
head: RequestHead,
head: H,
body: RB,
) -> Self::Future {
match self {
@ -172,44 +185,34 @@ where
}
}
type TunnelFuture = Box<
Future<
Item = (ResponseHead, Framed<Self::Io, ClientCodec>),
Error = SendRequestError,
>,
type TunnelFuture = LocalBoxFuture<
'static,
Result<(ResponseHead, Framed<Self::Io, ClientCodec>), SendRequestError>,
>;
/// Send request, returns Response and Framed
fn open_tunnel(self, head: RequestHead) -> Self::TunnelFuture {
fn open_tunnel<H: Into<RequestHeadType>>(self, head: H) -> Self::TunnelFuture {
match self {
EitherConnection::A(con) => Box::new(
con.open_tunnel(head)
.map(|(head, framed)| (head, framed.map_io(|io| EitherIo::A(io)))),
),
EitherConnection::B(con) => Box::new(
con.open_tunnel(head)
.map(|(head, framed)| (head, framed.map_io(|io| EitherIo::B(io)))),
),
EitherConnection::A(con) => con
.open_tunnel(head)
.map(|res| {
res.map(|(head, framed)| (head, framed.into_map_io(EitherIo::A)))
})
.boxed_local(),
EitherConnection::B(con) => con
.open_tunnel(head)
.map(|res| {
res.map(|(head, framed)| (head, framed.into_map_io(EitherIo::B)))
})
.boxed_local(),
}
}
}
#[pin_project(project = EitherIoProj)]
pub enum EitherIo<A, B> {
A(A),
B(B),
}
impl<A, B> io::Read for EitherIo<A, B>
where
A: io::Read,
B: io::Read,
{
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
match self {
EitherIo::A(ref mut val) => val.read(buf),
EitherIo::B(ref mut val) => val.read(buf),
}
}
A(#[pin] A),
B(#[pin] B),
}
impl<A, B> AsyncRead for EitherIo<A, B>
@ -217,7 +220,21 @@ where
A: AsyncRead,
B: AsyncRead,
{
unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
match self.project() {
EitherIoProj::A(val) => val.poll_read(cx, buf),
EitherIoProj::B(val) => val.poll_read(cx, buf),
}
}
unsafe fn prepare_uninitialized_buffer(
&self,
buf: &mut [mem::MaybeUninit<u8>],
) -> bool {
match self {
EitherIo::A(ref val) => val.prepare_uninitialized_buffer(buf),
EitherIo::B(ref val) => val.prepare_uninitialized_buffer(buf),
@ -225,45 +242,50 @@ where
}
}
impl<A, B> io::Write for EitherIo<A, B>
where
A: io::Write,
B: io::Write,
{
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
match self {
EitherIo::A(ref mut val) => val.write(buf),
EitherIo::B(ref mut val) => val.write(buf),
}
}
fn flush(&mut self) -> io::Result<()> {
match self {
EitherIo::A(ref mut val) => val.flush(),
EitherIo::B(ref mut val) => val.flush(),
}
}
}
impl<A, B> AsyncWrite for EitherIo<A, B>
where
A: AsyncWrite,
B: AsyncWrite,
{
fn shutdown(&mut self) -> Poll<(), io::Error> {
match self {
EitherIo::A(ref mut val) => val.shutdown(),
EitherIo::B(ref mut val) => val.shutdown(),
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
match self.project() {
EitherIoProj::A(val) => val.poll_write(cx, buf),
EitherIoProj::B(val) => val.poll_write(cx, buf),
}
}
fn write_buf<U: Buf>(&mut self, buf: &mut U) -> Poll<usize, io::Error>
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
match self.project() {
EitherIoProj::A(val) => val.poll_flush(cx),
EitherIoProj::B(val) => val.poll_flush(cx),
}
}
fn poll_shutdown(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<io::Result<()>> {
match self.project() {
EitherIoProj::A(val) => val.poll_shutdown(cx),
EitherIoProj::B(val) => val.poll_shutdown(cx),
}
}
fn poll_write_buf<U: Buf>(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut U,
) -> Poll<Result<usize, io::Error>>
where
Self: Sized,
{
match self {
EitherIo::A(ref mut val) => val.write_buf(buf),
EitherIo::B(ref mut val) => val.write_buf(buf),
match self.project() {
EitherIoProj::A(val) => val.poll_write_buf(cx, buf),
EitherIoProj::B(val) => val.poll_write_buf(cx, buf),
}
}
}

View File

@ -6,36 +6,60 @@ use actix_codec::{AsyncRead, AsyncWrite};
use actix_connect::{
default_connector, Connect as TcpConnect, Connection as TcpConnection,
};
use actix_service::{apply_fn, Service, ServiceExt};
use actix_rt::net::TcpStream;
use actix_service::{apply_fn, Service};
use actix_utils::timeout::{TimeoutError, TimeoutService};
use http::Uri;
use tokio_tcp::TcpStream;
use super::config::ConnectorConfig;
use super::connection::Connection;
use super::error::ConnectError;
use super::pool::{ConnectionPool, Protocol};
use super::Connect;
#[cfg(feature = "ssl")]
use openssl::ssl::SslConnector;
#[cfg(feature = "openssl")]
use actix_connect::ssl::openssl::SslConnector as OpensslConnector;
#[cfg(not(feature = "ssl"))]
#[cfg(feature = "rustls")]
use actix_connect::ssl::rustls::ClientConfig;
#[cfg(feature = "rustls")]
use std::sync::Arc;
#[cfg(any(feature = "openssl", feature = "rustls"))]
enum SslConnector {
#[cfg(feature = "openssl")]
Openssl(OpensslConnector),
#[cfg(feature = "rustls")]
Rustls(Arc<ClientConfig>),
}
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
type SslConnector = ();
/// Http client connector builde instance.
/// `Connector` type uses builder-like pattern for connector service construction.
/// Manages http client network connectivity
/// The `Connector` type uses a builder-like combinator pattern for service
/// construction that finishes by calling the `.finish()` method.
///
/// ```rust,ignore
/// use std::time::Duration;
/// use actix_http::client::Connector;
///
/// let connector = Connector::new()
/// .timeout(Duration::from_secs(5))
/// .finish();
/// ```
pub struct Connector<T, U> {
connector: T,
timeout: Duration,
conn_lifetime: Duration,
conn_keep_alive: Duration,
disconnect_timeout: Duration,
limit: usize,
config: ConnectorConfig,
#[allow(dead_code)]
ssl: SslConnector,
_t: PhantomData<U>,
}
trait Io: AsyncRead + AsyncWrite + Unpin {}
impl<T: AsyncRead + AsyncWrite + Unpin> Io for T {}
impl Connector<(), ()> {
#[allow(clippy::new_ret_no_self, clippy::let_unit_value)]
pub fn new() -> Connector<
impl Service<
Request = TcpConnect<Uri>,
@ -44,40 +68,54 @@ impl Connector<(), ()> {
> + Clone,
TcpStream,
> {
let ssl = {
#[cfg(feature = "ssl")]
{
use log::error;
use openssl::ssl::{SslConnector, SslMethod};
let mut ssl = SslConnector::builder(SslMethod::tls()).unwrap();
let _ = ssl
.set_alpn_protos(b"\x02h2\x08http/1.1")
.map_err(|e| error!("Can not set alpn protocol: {:?}", e));
ssl.build()
}
#[cfg(not(feature = "ssl"))]
{}
};
Connector {
ssl,
ssl: Self::build_ssl(vec![b"h2".to_vec(), b"http/1.1".to_vec()]),
connector: default_connector(),
timeout: Duration::from_secs(1),
conn_lifetime: Duration::from_secs(75),
conn_keep_alive: Duration::from_secs(15),
disconnect_timeout: Duration::from_millis(3000),
limit: 100,
config: ConnectorConfig::default(),
_t: PhantomData,
}
}
// Build Ssl connector with openssl, based on supplied alpn protocols
#[cfg(feature = "openssl")]
fn build_ssl(protocols: Vec<Vec<u8>>) -> SslConnector {
use actix_connect::ssl::openssl::SslMethod;
use bytes::{BufMut, BytesMut};
let mut alpn = BytesMut::with_capacity(20);
for proto in protocols.iter() {
alpn.put_u8(proto.len() as u8);
alpn.put(proto.as_slice());
}
let mut ssl = OpensslConnector::builder(SslMethod::tls()).unwrap();
let _ = ssl
.set_alpn_protos(&alpn)
.map_err(|e| error!("Can not set alpn protocol: {:?}", e));
SslConnector::Openssl(ssl.build())
}
// Build Ssl connector with rustls, based on supplied alpn protocols
#[cfg(all(not(feature = "openssl"), feature = "rustls"))]
fn build_ssl(protocols: Vec<Vec<u8>>) -> SslConnector {
let mut config = ClientConfig::new();
config.set_protocols(&protocols);
config
.root_store
.add_server_trust_anchors(&actix_tls::rustls::TLS_SERVER_ROOTS);
SslConnector::Rustls(Arc::new(config))
}
// ssl turned off, provides empty ssl connector
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
fn build_ssl(_: Vec<Vec<u8>>) -> SslConnector {}
}
impl<T, U> Connector<T, U> {
/// Use custom connector.
pub fn connector<T1, U1>(self, connector: T1) -> Connector<T1, U1>
where
U1: AsyncRead + AsyncWrite + fmt::Debug,
U1: AsyncRead + AsyncWrite + Unpin + fmt::Debug,
T1: Service<
Request = TcpConnect<Uri>,
Response = TcpConnection<Uri, U1>,
@ -86,11 +124,7 @@ impl<T, U> Connector<T, U> {
{
Connector {
connector,
timeout: self.timeout,
conn_lifetime: self.conn_lifetime,
conn_keep_alive: self.conn_keep_alive,
disconnect_timeout: self.disconnect_timeout,
limit: self.limit,
config: self.config,
ssl: self.ssl,
_t: PhantomData,
}
@ -99,24 +133,63 @@ impl<T, U> Connector<T, U> {
impl<T, U> Connector<T, U>
where
U: AsyncRead + AsyncWrite + fmt::Debug + 'static,
U: AsyncRead + AsyncWrite + Unpin + fmt::Debug + 'static,
T: Service<
Request = TcpConnect<Uri>,
Response = TcpConnection<Uri, U>,
Error = actix_connect::ConnectError,
> + Clone,
> + Clone
+ 'static,
{
/// Connection timeout, i.e. max time to connect to remote host including dns name resolution.
/// Set to 1 second by default.
pub fn timeout(mut self, timeout: Duration) -> Self {
self.timeout = timeout;
self.config.timeout = timeout;
self
}
#[cfg(feature = "ssl")]
#[cfg(feature = "openssl")]
/// Use custom `SslConnector` instance.
pub fn ssl(mut self, connector: SslConnector) -> Self {
self.ssl = connector;
pub fn ssl(mut self, connector: OpensslConnector) -> Self {
self.ssl = SslConnector::Openssl(connector);
self
}
#[cfg(feature = "rustls")]
pub fn rustls(mut self, connector: Arc<ClientConfig>) -> Self {
self.ssl = SslConnector::Rustls(connector);
self
}
/// Maximum supported http major version
/// Supported versions http/1.1, http/2
pub fn max_http_version(mut self, val: http::Version) -> Self {
let versions = match val {
http::Version::HTTP_11 => vec![b"http/1.1".to_vec()],
http::Version::HTTP_2 => vec![b"h2".to_vec(), b"http/1.1".to_vec()],
_ => {
unimplemented!("actix-http:client: supported versions http/1.1, http/2")
}
};
self.ssl = Connector::build_ssl(versions);
self
}
/// Indicates the initial window size (in octets) for
/// HTTP2 stream-level flow control for received data.
///
/// The default value is 65,535 and is good for APIs, but not for big objects.
pub fn initial_window_size(mut self, size: u32) -> Self {
self.config.stream_window_size = size;
self
}
/// Indicates the initial window size (in octets) for
/// HTTP2 connection-level flow control for received data.
///
/// The default value is 65,535 and is good for APIs, but not for big objects.
pub fn initial_connection_window_size(mut self, size: u32) -> Self {
self.config.conn_window_size = size;
self
}
@ -125,7 +198,7 @@ where
/// If limit is 0, the connector has no limit.
/// The default limit size is 100.
pub fn limit(mut self, limit: usize) -> Self {
self.limit = limit;
self.config.limit = limit;
self
}
@ -136,7 +209,7 @@ where
/// exceeds this period, the connection is closed.
/// Default keep-alive period is 15 seconds.
pub fn conn_keep_alive(mut self, dur: Duration) -> Self {
self.conn_keep_alive = dur;
self.config.conn_keep_alive = dur;
self
}
@ -146,7 +219,7 @@ where
/// until it is closed regardless of keep-alive period.
/// Default lifetime period is 75 seconds.
pub fn conn_lifetime(mut self, dur: Duration) -> Self {
self.conn_lifetime = dur;
self.config.conn_lifetime = dur;
self
}
@ -159,22 +232,26 @@ where
///
/// By default disconnect timeout is set to 3000 milliseconds.
pub fn disconnect_timeout(mut self, dur: Duration) -> Self {
self.disconnect_timeout = dur;
self.config.disconnect_timeout = Some(dur);
self
}
/// Finish configuration process and create connector service.
pub fn service(
/// The Connector builder always concludes by calling `finish()` last in
/// its combinator chain.
pub fn finish(
self,
) -> impl Service<Request = Uri, Response = impl Connection, Error = ConnectError> + Clone
{
#[cfg(not(feature = "ssl"))]
) -> impl Service<Request = Connect, Response = impl Connection, Error = ConnectError>
+ Clone {
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
{
let connector = TimeoutService::new(
self.timeout,
apply_fn(self.connector, |msg: Uri, srv| srv.call(msg.into()))
.map_err(ConnectError::from)
.map(|stream| (stream.into_parts().0, Protocol::Http1)),
self.config.timeout,
apply_fn(self.connector, |msg: Connect, srv| {
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
})
.map_err(ConnectError::from)
.map(|stream| (stream.into_parts().0, Protocol::Http1)),
)
.map_err(|e| match e {
TimeoutError::Service(e) => e,
@ -184,40 +261,66 @@ where
connect_impl::InnerConnector {
tcp_pool: ConnectionPool::new(
connector,
self.conn_lifetime,
self.conn_keep_alive,
None,
self.limit,
self.config.no_disconnect_timeout(),
),
}
}
#[cfg(feature = "ssl")]
#[cfg(any(feature = "openssl", feature = "rustls"))]
{
const H2: &[u8] = b"h2";
use actix_connect::ssl::OpensslConnector;
#[cfg(feature = "openssl")]
use actix_connect::ssl::openssl::OpensslConnector;
#[cfg(feature = "rustls")]
use actix_connect::ssl::rustls::{RustlsConnector, Session};
use actix_service::{boxed::service, pipeline};
let ssl_service = TimeoutService::new(
self.timeout,
apply_fn(self.connector.clone(), |msg: Uri, srv| srv.call(msg.into()))
.map_err(ConnectError::from)
.and_then(
OpensslConnector::service(self.ssl)
.map_err(ConnectError::from)
self.config.timeout,
pipeline(
apply_fn(self.connector.clone(), |msg: Connect, srv| {
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
})
.map_err(ConnectError::from),
)
.and_then(match self.ssl {
#[cfg(feature = "openssl")]
SslConnector::Openssl(ssl) => service(
OpensslConnector::service(ssl)
.map(|stream| {
let sock = stream.into_parts().0;
let h2 = sock
.get_ref()
.ssl()
.selected_alpn_protocol()
.map(|protos| protos.windows(2).any(|w| w == H2))
.unwrap_or(false);
if h2 {
(sock, Protocol::Http2)
(Box::new(sock) as Box<dyn Io>, Protocol::Http2)
} else {
(sock, Protocol::Http1)
(Box::new(sock) as Box<dyn Io>, Protocol::Http1)
}
})
.map_err(ConnectError::from),
),
#[cfg(feature = "rustls")]
SslConnector::Rustls(ssl) => service(
RustlsConnector::service(ssl)
.map_err(ConnectError::from)
.map(|stream| {
let sock = stream.into_parts().0;
let h2 = sock
.get_ref()
.1
.get_alpn_protocol()
.map(|protos| protos.windows(2).any(|w| w == H2))
.unwrap_or(false);
if h2 {
(Box::new(sock) as Box<dyn Io>, Protocol::Http2)
} else {
(Box::new(sock) as Box<dyn Io>, Protocol::Http1)
}
}),
),
}),
)
.map_err(|e| match e {
TimeoutError::Service(e) => e,
@ -225,10 +328,12 @@ where
});
let tcp_service = TimeoutService::new(
self.timeout,
apply_fn(self.connector.clone(), |msg: Uri, srv| srv.call(msg.into()))
.map_err(ConnectError::from)
.map(|stream| (stream.into_parts().0, Protocol::Http1)),
self.config.timeout,
apply_fn(self.connector, |msg: Connect, srv| {
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
})
.map_err(ConnectError::from)
.map(|stream| (stream.into_parts().0, Protocol::Http1)),
)
.map_err(|e| match e {
TimeoutError::Service(e) => e,
@ -238,44 +343,37 @@ where
connect_impl::InnerConnector {
tcp_pool: ConnectionPool::new(
tcp_service,
self.conn_lifetime,
self.conn_keep_alive,
None,
self.limit,
),
ssl_pool: ConnectionPool::new(
ssl_service,
self.conn_lifetime,
self.conn_keep_alive,
Some(self.disconnect_timeout),
self.limit,
self.config.no_disconnect_timeout(),
),
ssl_pool: ConnectionPool::new(ssl_service, self.config),
}
}
}
}
#[cfg(not(feature = "ssl"))]
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
mod connect_impl {
use futures::future::{err, Either, FutureResult};
use futures::Poll;
use std::task::{Context, Poll};
use futures_util::future::{err, Either, Ready};
use super::*;
use crate::client::connection::IoConnection;
pub(crate) struct InnerConnector<T, Io>
where
Io: AsyncRead + AsyncWrite + 'static,
T: Service<Request = Uri, Response = (Io, Protocol), Error = ConnectError>,
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
pub(crate) tcp_pool: ConnectionPool<T, Io>,
}
impl<T, Io> Clone for InnerConnector<T, Io>
where
Io: AsyncRead + AsyncWrite + 'static,
T: Service<Request = Uri, Response = (Io, Protocol), Error = ConnectError>
+ Clone,
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
fn clone(&self) -> Self {
InnerConnector {
@ -286,48 +384,52 @@ mod connect_impl {
impl<T, Io> Service for InnerConnector<T, Io>
where
Io: AsyncRead + AsyncWrite + 'static,
T: Service<Request = Uri, Response = (Io, Protocol), Error = ConnectError>,
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
type Request = Uri;
type Request = Connect;
type Response = IoConnection<Io>;
type Error = ConnectError;
type Future = Either<
<ConnectionPool<T, Io> as Service>::Future,
FutureResult<IoConnection<Io>, ConnectError>,
Ready<Result<IoConnection<Io>, ConnectError>>,
>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.tcp_pool.poll_ready()
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.tcp_pool.poll_ready(cx)
}
fn call(&mut self, req: Uri) -> Self::Future {
match req.scheme_str() {
fn call(&mut self, req: Connect) -> Self::Future {
match req.uri.scheme_str() {
Some("https") | Some("wss") => {
Either::B(err(ConnectError::SslIsNotSupported))
Either::Right(err(ConnectError::SslIsNotSupported))
}
_ => Either::A(self.tcp_pool.call(req)),
_ => Either::Left(self.tcp_pool.call(req)),
}
}
}
}
#[cfg(feature = "ssl")]
#[cfg(any(feature = "openssl", feature = "rustls"))]
mod connect_impl {
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use futures::future::{Either, FutureResult};
use futures::{Async, Future, Poll};
use futures_core::ready;
use futures_util::future::Either;
use super::*;
use crate::client::connection::EitherConnection;
pub(crate) struct InnerConnector<T1, T2, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + 'static,
Io2: AsyncRead + AsyncWrite + 'static,
T1: Service<Request = Uri, Response = (Io1, Protocol), Error = ConnectError>,
T2: Service<Request = Uri, Response = (Io2, Protocol), Error = ConnectError>,
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>,
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>,
{
pub(crate) tcp_pool: ConnectionPool<T1, Io1>,
pub(crate) ssl_pool: ConnectionPool<T2, Io2>,
@ -335,12 +437,12 @@ mod connect_impl {
impl<T1, T2, Io1, Io2> Clone for InnerConnector<T1, T2, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + 'static,
Io2: AsyncRead + AsyncWrite + 'static,
T1: Service<Request = Uri, Response = (Io1, Protocol), Error = ConnectError>
+ Clone,
T2: Service<Request = Uri, Response = (Io2, Protocol), Error = ConnectError>
+ Clone,
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
+ 'static,
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
{
fn clone(&self) -> Self {
InnerConnector {
@ -352,91 +454,94 @@ mod connect_impl {
impl<T1, T2, Io1, Io2> Service for InnerConnector<T1, T2, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + 'static,
Io2: AsyncRead + AsyncWrite + 'static,
T1: Service<Request = Uri, Response = (Io1, Protocol), Error = ConnectError>,
T2: Service<Request = Uri, Response = (Io2, Protocol), Error = ConnectError>,
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T1: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
+ 'static,
T2: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
{
type Request = Uri;
type Request = Connect;
type Response = EitherConnection<Io1, Io2>;
type Error = ConnectError;
type Future = Either<
FutureResult<Self::Response, Self::Error>,
Either<
InnerConnectorResponseA<T1, Io1, Io2>,
InnerConnectorResponseB<T2, Io1, Io2>,
>,
InnerConnectorResponseA<T1, Io1, Io2>,
InnerConnectorResponseB<T2, Io1, Io2>,
>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.tcp_pool.poll_ready()
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.tcp_pool.poll_ready(cx)
}
fn call(&mut self, req: Uri) -> Self::Future {
match req.scheme_str() {
Some("https") | Some("wss") => {
Either::B(Either::B(InnerConnectorResponseB {
fut: self.ssl_pool.call(req),
_t: PhantomData,
}))
}
_ => Either::B(Either::A(InnerConnectorResponseA {
fn call(&mut self, req: Connect) -> Self::Future {
match req.uri.scheme_str() {
Some("https") | Some("wss") => Either::Right(InnerConnectorResponseB {
fut: self.ssl_pool.call(req),
_t: PhantomData,
}),
_ => Either::Left(InnerConnectorResponseA {
fut: self.tcp_pool.call(req),
_t: PhantomData,
})),
}),
}
}
}
#[pin_project::pin_project]
pub(crate) struct InnerConnectorResponseA<T, Io1, Io2>
where
Io1: AsyncRead + AsyncWrite + 'static,
T: Service<Request = Uri, Response = (Io1, Protocol), Error = ConnectError>,
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
+ 'static,
{
#[pin]
fut: <ConnectionPool<T, Io1> as Service>::Future,
_t: PhantomData<Io2>,
}
impl<T, Io1, Io2> Future for InnerConnectorResponseA<T, Io1, Io2>
where
T: Service<Request = Uri, Response = (Io1, Protocol), Error = ConnectError>,
Io1: AsyncRead + AsyncWrite + 'static,
Io2: AsyncRead + AsyncWrite + 'static,
T: Service<Request = Connect, Response = (Io1, Protocol), Error = ConnectError>
+ 'static,
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Item = EitherConnection<Io1, Io2>;
type Error = ConnectError;
type Output = Result<EitherConnection<Io1, Io2>, ConnectError>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.fut.poll()? {
Async::NotReady => Ok(Async::NotReady),
Async::Ready(res) => Ok(Async::Ready(EitherConnection::A(res))),
}
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(
ready!(Pin::new(&mut self.get_mut().fut).poll(cx))
.map(EitherConnection::A),
)
}
}
#[pin_project::pin_project]
pub(crate) struct InnerConnectorResponseB<T, Io1, Io2>
where
Io2: AsyncRead + AsyncWrite + 'static,
T: Service<Request = Uri, Response = (Io2, Protocol), Error = ConnectError>,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
{
#[pin]
fut: <ConnectionPool<T, Io2> as Service>::Future,
_t: PhantomData<Io1>,
}
impl<T, Io1, Io2> Future for InnerConnectorResponseB<T, Io1, Io2>
where
T: Service<Request = Uri, Response = (Io2, Protocol), Error = ConnectError>,
Io1: AsyncRead + AsyncWrite + 'static,
Io2: AsyncRead + AsyncWrite + 'static,
T: Service<Request = Connect, Response = (Io2, Protocol), Error = ConnectError>
+ 'static,
Io1: AsyncRead + AsyncWrite + Unpin + 'static,
Io2: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Item = EitherConnection<Io1, Io2>;
type Error = ConnectError;
type Output = Result<EitherConnection<Io1, Io2>, ConnectError>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.fut.poll()? {
Async::NotReady => Ok(Async::NotReady),
Async::Ready(res) => Ok(Async::Ready(EitherConnection::B(res))),
}
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(
ready!(Pin::new(&mut self.get_mut().fut).poll(cx))
.map(EitherConnection::B),
)
}
}
}

View File

@ -1,14 +1,13 @@
use std::io;
use actix_connect::resolver::ResolveError;
use derive_more::{Display, From};
use trust_dns_resolver::error::ResolveError;
#[cfg(feature = "ssl")]
use openssl::ssl::{Error as SslError, HandshakeError};
#[cfg(feature = "openssl")]
use actix_connect::ssl::openssl::{HandshakeError, SslError};
use crate::error::{Error, ParseError, ResponseError};
use crate::http::Error as HttpError;
use crate::response::Response;
use crate::http::{Error as HttpError, StatusCode};
/// A set of errors that can occur while connecting to an HTTP host
#[derive(Debug, Display, From)]
@ -18,10 +17,15 @@ pub enum ConnectError {
SslIsNotSupported,
/// SSL error
#[cfg(feature = "ssl")]
#[cfg(feature = "openssl")]
#[display(fmt = "{}", _0)]
SslError(SslError),
/// SSL Handshake error
#[cfg(feature = "openssl")]
#[display(fmt = "{}", _0)]
SslHandshakeError(String),
/// Failed to resolve the hostname
#[display(fmt = "Failed resolving hostname: {}", _0)]
Resolver(ResolveError),
@ -35,7 +39,7 @@ pub enum ConnectError {
H2(h2::Error),
/// Connecting took too long
#[display(fmt = "Timeout out while establishing connection")]
#[display(fmt = "Timeout while establishing connection")]
Timeout,
/// Connector has been disconnected
@ -44,33 +48,31 @@ pub enum ConnectError {
/// Unresolved host name
#[display(fmt = "Connector received `Connect` method with unresolved host")]
Unresolverd,
Unresolved,
/// Connection io error
#[display(fmt = "{}", _0)]
Io(io::Error),
}
impl std::error::Error for ConnectError {}
impl From<actix_connect::ConnectError> for ConnectError {
fn from(err: actix_connect::ConnectError) -> ConnectError {
match err {
actix_connect::ConnectError::Resolver(e) => ConnectError::Resolver(e),
actix_connect::ConnectError::NoRecords => ConnectError::NoRecords,
actix_connect::ConnectError::InvalidInput => panic!(),
actix_connect::ConnectError::Unresolverd => ConnectError::Unresolverd,
actix_connect::ConnectError::Unresolved => ConnectError::Unresolved,
actix_connect::ConnectError::Io(e) => ConnectError::Io(e),
}
}
}
#[cfg(feature = "ssl")]
impl<T> From<HandshakeError<T>> for ConnectError {
#[cfg(feature = "openssl")]
impl<T: std::fmt::Debug> From<HandshakeError<T>> for ConnectError {
fn from(err: HandshakeError<T>) -> ConnectError {
match err {
HandshakeError::SetupFailure(stack) => SslError::from(stack).into(),
HandshakeError::Failure(stream) => stream.into_error().into(),
HandshakeError::WouldBlock(stream) => stream.into_error().into(),
}
ConnectError::SslHandshakeError(format!("{:?}", err))
}
}
@ -86,6 +88,8 @@ pub enum InvalidUrl {
HttpError(http::Error),
}
impl std::error::Error for InvalidUrl {}
/// A set of errors that can occur during request sending and response reading
#[derive(Debug, Display, From)]
pub enum SendRequestError {
@ -105,6 +109,9 @@ pub enum SendRequestError {
/// Http2 error
#[display(fmt = "{}", _0)]
H2(h2::Error),
/// Response took too long
#[display(fmt = "Timeout while waiting for response")]
Timeout,
/// Tunnels are not supported for http2 connection
#[display(fmt = "Tunnels are not supported for http2 connection")]
TunnelNotSupported,
@ -112,16 +119,39 @@ pub enum SendRequestError {
Body(Error),
}
impl std::error::Error for SendRequestError {}
/// Convert `SendRequestError` to a server `Response`
impl ResponseError for SendRequestError {
fn error_response(&self) -> Response {
fn status_code(&self) -> StatusCode {
match *self {
SendRequestError::Connect(ConnectError::Timeout) => {
Response::GatewayTimeout()
StatusCode::GATEWAY_TIMEOUT
}
SendRequestError::Connect(_) => Response::BadGateway(),
_ => Response::InternalServerError(),
SendRequestError::Connect(_) => StatusCode::BAD_REQUEST,
_ => StatusCode::INTERNAL_SERVER_ERROR,
}
}
}
/// A set of errors that can occur during freezing a request
#[derive(Debug, Display, From)]
pub enum FreezeRequestError {
/// Invalid URL
#[display(fmt = "Invalid URL: {}", _0)]
Url(InvalidUrl),
/// Http error
#[display(fmt = "{}", _0)]
Http(HttpError),
}
impl std::error::Error for FreezeRequestError {}
impl From<FreezeRequestError> for SendRequestError {
fn from(e: FreezeRequestError) -> Self {
match e {
FreezeRequestError::Url(e) => e.into(),
FreezeRequestError::Http(e) => e.into(),
}
.into()
}
}

View File

@ -1,13 +1,20 @@
use std::{io, time};
use std::io::Write;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{io, mem, time};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use bytes::Bytes;
use futures::future::{ok, Either};
use futures::{Async, Future, Poll, Sink, Stream};
use bytes::buf::BufMutExt;
use bytes::{Bytes, BytesMut};
use futures_core::Stream;
use futures_util::future::poll_fn;
use futures_util::{pin_mut, SinkExt, StreamExt};
use crate::error::PayloadError;
use crate::h1;
use crate::message::{RequestHead, ResponseHead};
use crate::header::HeaderMap;
use crate::http::header::{IntoHeaderValue, HOST};
use crate::message::{RequestHeadType, ResponseHead};
use crate::payload::{Payload, PayloadStream};
use super::connection::{ConnectionLifetime, ConnectionType, IoConnection};
@ -15,98 +22,162 @@ use super::error::{ConnectError, SendRequestError};
use super::pool::Acquired;
use crate::body::{BodySize, MessageBody};
pub(crate) fn send_request<T, B>(
pub(crate) async fn send_request<T, B>(
io: T,
head: RequestHead,
mut head: RequestHeadType,
body: B,
created: time::Instant,
pool: Option<Acquired<T>>,
) -> impl Future<Item = (ResponseHead, Payload), Error = SendRequestError>
) -> Result<(ResponseHead, Payload), SendRequestError>
where
T: AsyncRead + AsyncWrite + 'static,
T: AsyncRead + AsyncWrite + Unpin + 'static,
B: MessageBody,
{
// set request host header
if !head.as_ref().headers.contains_key(HOST)
&& !head.extra_headers().iter().any(|h| h.contains_key(HOST))
{
if let Some(host) = head.as_ref().uri.host() {
let mut wrt = BytesMut::with_capacity(host.len() + 5).writer();
let _ = match head.as_ref().uri.port_u16() {
None | Some(80) | Some(443) => write!(wrt, "{}", host),
Some(port) => write!(wrt, "{}:{}", host, port),
};
match wrt.get_mut().split().freeze().try_into() {
Ok(value) => match head {
RequestHeadType::Owned(ref mut head) => {
head.headers.insert(HOST, value)
}
RequestHeadType::Rc(_, ref mut extra_headers) => {
let headers = extra_headers.get_or_insert(HeaderMap::new());
headers.insert(HOST, value)
}
},
Err(e) => log::error!("Can not set HOST header {}", e),
}
}
}
let io = H1Connection {
created,
pool,
io: Some(io),
};
let len = body.length();
// create Framed and send request
let mut framed_inner = Framed::new(io, h1::ClientCodec::default());
framed_inner.send((head, body.size()).into()).await?;
// create Framed and send reqest
Framed::new(io, h1::ClientCodec::default())
.send((head, len).into())
.from_err()
// send request body
.and_then(move |framed| match body.length() {
BodySize::None | BodySize::Empty | BodySize::Sized(0) => {
Either::A(ok(framed))
}
_ => Either::B(SendBody::new(body, framed)),
})
// read response and init read body
.and_then(|framed| {
framed
.into_future()
.map_err(|(e, _)| SendRequestError::from(e))
.and_then(|(item, framed)| {
if let Some(res) = item {
match framed.get_codec().message_type() {
h1::MessageType::None => {
let force_close = !framed.get_codec().keepalive();
release_connection(framed, force_close);
Ok((res, Payload::None))
}
_ => {
let pl: PayloadStream = Box::new(PlStream::new(framed));
Ok((res, pl.into()))
}
}
} else {
Err(ConnectError::Disconnected.into())
}
})
})
// send request body
match body.size() {
BodySize::None | BodySize::Empty | BodySize::Sized(0) => (),
_ => send_body(body, Pin::new(&mut framed_inner)).await?,
};
// read response and init read body
let res = Pin::new(&mut framed_inner).into_future().await;
let (head, framed) = if let (Some(result), framed) = res {
let item = result.map_err(SendRequestError::from)?;
(item, framed)
} else {
return Err(SendRequestError::from(ConnectError::Disconnected));
};
match framed.codec_ref().message_type() {
h1::MessageType::None => {
let force_close = !framed.codec_ref().keepalive();
release_connection(framed, force_close);
Ok((head, Payload::None))
}
_ => {
let pl: PayloadStream = PlStream::new(framed_inner).boxed_local();
Ok((head, pl.into()))
}
}
}
pub(crate) fn open_tunnel<T>(
pub(crate) async fn open_tunnel<T>(
io: T,
head: RequestHead,
) -> impl Future<Item = (ResponseHead, Framed<T, h1::ClientCodec>), Error = SendRequestError>
head: RequestHeadType,
) -> Result<(ResponseHead, Framed<T, h1::ClientCodec>), SendRequestError>
where
T: AsyncRead + AsyncWrite + 'static,
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
// create Framed and send reqest
Framed::new(io, h1::ClientCodec::default())
.send((head, BodySize::None).into())
.from_err()
// read response
.and_then(|framed| {
framed
.into_future()
.map_err(|(e, _)| SendRequestError::from(e))
.and_then(|(head, framed)| {
if let Some(head) = head {
Ok((head, framed))
// create Framed and send request
let mut framed = Framed::new(io, h1::ClientCodec::default());
framed.send((head, BodySize::None).into()).await?;
// read response
if let (Some(result), framed) = framed.into_future().await {
let head = result.map_err(SendRequestError::from)?;
Ok((head, framed))
} else {
Err(SendRequestError::from(ConnectError::Disconnected))
}
}
/// send request body to the peer
pub(crate) async fn send_body<T, B>(
body: B,
mut framed: Pin<&mut Framed<T, h1::ClientCodec>>,
) -> Result<(), SendRequestError>
where
T: ConnectionLifetime + Unpin,
B: MessageBody,
{
pin_mut!(body);
let mut eof = false;
while !eof {
while !eof && !framed.as_ref().is_write_buf_full() {
match poll_fn(|cx| body.as_mut().poll_next(cx)).await {
Some(result) => {
framed.as_mut().write(h1::Message::Chunk(Some(result?)))?;
}
None => {
eof = true;
framed.as_mut().write(h1::Message::Chunk(None))?;
}
}
}
if !framed.as_ref().is_write_buf_empty() {
poll_fn(|cx| match framed.as_mut().flush(cx) {
Poll::Ready(Ok(_)) => Poll::Ready(Ok(())),
Poll::Ready(Err(err)) => Poll::Ready(Err(err)),
Poll::Pending => {
if !framed.as_ref().is_write_buf_full() {
Poll::Ready(Ok(()))
} else {
Err(SendRequestError::from(ConnectError::Disconnected))
Poll::Pending
}
})
})
}
})
.await?;
}
}
SinkExt::flush(Pin::into_inner(framed)).await?;
Ok(())
}
#[doc(hidden)]
/// HTTP client connection
pub struct H1Connection<T> {
/// T should be `Unpin`
io: Option<T>,
created: time::Instant,
pool: Option<Acquired<T>>,
}
impl<T: AsyncRead + AsyncWrite + 'static> ConnectionLifetime for H1Connection<T> {
impl<T> ConnectionLifetime for H1Connection<T>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
/// Close connection
fn close(&mut self) {
fn close(mut self: Pin<&mut Self>) {
if let Some(mut pool) = self.pool.take() {
if let Some(io) = self.io.take() {
pool.close(IoConnection::new(
@ -119,7 +190,7 @@ impl<T: AsyncRead + AsyncWrite + 'static> ConnectionLifetime for H1Connection<T>
}
/// Release this connection to the connection pool
fn release(&mut self) {
fn release(mut self: Pin<&mut Self>) {
if let Some(mut pool) = self.pool.take() {
if let Some(io) = self.io.take() {
pool.release(IoConnection::new(
@ -132,143 +203,96 @@ impl<T: AsyncRead + AsyncWrite + 'static> ConnectionLifetime for H1Connection<T>
}
}
impl<T: AsyncRead + AsyncWrite + 'static> io::Read for H1Connection<T> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.io.as_mut().unwrap().read(buf)
impl<T: AsyncRead + AsyncWrite + Unpin + 'static> AsyncRead for H1Connection<T> {
unsafe fn prepare_uninitialized_buffer(
&self,
buf: &mut [mem::MaybeUninit<u8>],
) -> bool {
self.io.as_ref().unwrap().prepare_uninitialized_buffer(buf)
}
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut [u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.io.as_mut().unwrap()).poll_read(cx, buf)
}
}
impl<T: AsyncRead + AsyncWrite + 'static> AsyncRead for H1Connection<T> {}
impl<T: AsyncRead + AsyncWrite + 'static> io::Write for H1Connection<T> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.io.as_mut().unwrap().write(buf)
impl<T: AsyncRead + AsyncWrite + Unpin + 'static> AsyncWrite for H1Connection<T> {
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
Pin::new(&mut self.io.as_mut().unwrap()).poll_write(cx, buf)
}
fn flush(&mut self) -> io::Result<()> {
self.io.as_mut().unwrap().flush()
}
}
impl<T: AsyncRead + AsyncWrite + 'static> AsyncWrite for H1Connection<T> {
fn shutdown(&mut self) -> Poll<(), io::Error> {
self.io.as_mut().unwrap().shutdown()
}
}
/// Future responsible for sending request body to the peer
pub(crate) struct SendBody<I, B> {
body: Option<B>,
framed: Option<Framed<I, h1::ClientCodec>>,
flushed: bool,
}
impl<I, B> SendBody<I, B>
where
I: AsyncRead + AsyncWrite + 'static,
B: MessageBody,
{
pub(crate) fn new(body: B, framed: Framed<I, h1::ClientCodec>) -> Self {
SendBody {
body: Some(body),
framed: Some(framed),
flushed: true,
}
}
}
impl<I, B> Future for SendBody<I, B>
where
I: ConnectionLifetime,
B: MessageBody,
{
type Item = Framed<I, h1::ClientCodec>;
type Error = SendRequestError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let mut body_ready = true;
loop {
while body_ready
&& self.body.is_some()
&& !self.framed.as_ref().unwrap().is_write_buf_full()
{
match self.body.as_mut().unwrap().poll_next()? {
Async::Ready(item) => {
// check if body is done
if item.is_none() {
let _ = self.body.take();
}
self.flushed = false;
self.framed
.as_mut()
.unwrap()
.force_send(h1::Message::Chunk(item))?;
break;
}
Async::NotReady => body_ready = false,
}
}
if !self.flushed {
match self.framed.as_mut().unwrap().poll_complete()? {
Async::Ready(_) => {
self.flushed = true;
continue;
}
Async::NotReady => return Ok(Async::NotReady),
}
}
if self.body.is_none() {
return Ok(Async::Ready(self.framed.take().unwrap()));
}
return Ok(Async::NotReady);
}
fn poll_flush(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<io::Result<()>> {
Pin::new(self.io.as_mut().unwrap()).poll_flush(cx)
}
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), io::Error>> {
Pin::new(self.io.as_mut().unwrap()).poll_shutdown(cx)
}
}
#[pin_project::pin_project]
pub(crate) struct PlStream<Io> {
#[pin]
framed: Option<Framed<Io, h1::ClientPayloadCodec>>,
}
impl<Io: ConnectionLifetime> PlStream<Io> {
fn new(framed: Framed<Io, h1::ClientCodec>) -> Self {
let framed = framed.into_map_codec(|codec| codec.into_payload_codec());
PlStream {
framed: Some(framed.map_codec(|codec| codec.into_payload_codec())),
framed: Some(framed),
}
}
}
impl<Io: ConnectionLifetime> Stream for PlStream<Io> {
type Item = Bytes;
type Error = PayloadError;
type Item = Result<Bytes, PayloadError>;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
match self.framed.as_mut().unwrap().poll()? {
Async::NotReady => Ok(Async::NotReady),
Async::Ready(Some(chunk)) => {
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
let mut this = self.project();
match this.framed.as_mut().as_pin_mut().unwrap().next_item(cx)? {
Poll::Pending => Poll::Pending,
Poll::Ready(Some(chunk)) => {
if let Some(chunk) = chunk {
Ok(Async::Ready(Some(chunk)))
Poll::Ready(Some(Ok(chunk)))
} else {
let framed = self.framed.take().unwrap();
let force_close = framed.get_codec().keepalive();
let framed = this.framed.as_mut().as_pin_mut().unwrap();
let force_close = !framed.codec_ref().keepalive();
release_connection(framed, force_close);
Ok(Async::Ready(None))
Poll::Ready(None)
}
}
Async::Ready(None) => Ok(Async::Ready(None)),
Poll::Ready(None) => Poll::Ready(None),
}
}
}
fn release_connection<T, U>(framed: Framed<T, U>, force_close: bool)
fn release_connection<T, U>(framed: Pin<&mut Framed<T, U>>, force_close: bool)
where
T: ConnectionLifetime,
{
let mut parts = framed.into_parts();
if !force_close && parts.read_buf.is_empty() && parts.write_buf.is_empty() {
parts.io.release()
if !force_close && framed.is_read_buf_empty() && framed.is_write_buf_empty() {
framed.io_pin().release()
} else {
parts.io.close()
framed.io_pin().close()
}
}

View File

@ -1,175 +1,178 @@
use std::convert::TryFrom;
use std::future::Future;
use std::time;
use actix_codec::{AsyncRead, AsyncWrite};
use bytes::Bytes;
use futures::future::{err, Either};
use futures::{Async, Future, Poll};
use h2::{client::SendRequest, SendStream};
use futures_util::future::poll_fn;
use futures_util::pin_mut;
use h2::{
client::{Builder, Connection, SendRequest},
SendStream,
};
use http::header::{HeaderValue, CONNECTION, CONTENT_LENGTH, TRANSFER_ENCODING};
use http::{request::Request, HttpTryFrom, Method, Version};
use http::{request::Request, Method, Version};
use crate::body::{BodySize, MessageBody};
use crate::message::{RequestHead, ResponseHead};
use crate::header::HeaderMap;
use crate::message::{RequestHeadType, ResponseHead};
use crate::payload::Payload;
use super::config::ConnectorConfig;
use super::connection::{ConnectionType, IoConnection};
use super::error::SendRequestError;
use super::pool::Acquired;
pub(crate) fn send_request<T, B>(
io: SendRequest<Bytes>,
head: RequestHead,
pub(crate) async fn send_request<T, B>(
mut io: SendRequest<Bytes>,
head: RequestHeadType,
body: B,
created: time::Instant,
pool: Option<Acquired<T>>,
) -> impl Future<Item = (ResponseHead, Payload), Error = SendRequestError>
) -> Result<(ResponseHead, Payload), SendRequestError>
where
T: AsyncRead + AsyncWrite + 'static,
T: AsyncRead + AsyncWrite + Unpin + 'static,
B: MessageBody,
{
trace!("Sending client request: {:?} {:?}", head, body.length());
let head_req = head.method == Method::HEAD;
let length = body.length();
let eof = match length {
BodySize::None | BodySize::Empty | BodySize::Sized(0) => true,
_ => false,
trace!("Sending client request: {:?} {:?}", head, body.size());
let head_req = head.as_ref().method == Method::HEAD;
let length = body.size();
let eof = matches!(
length,
BodySize::None | BodySize::Empty | BodySize::Sized(0)
);
let mut req = Request::new(());
*req.uri_mut() = head.as_ref().uri.clone();
*req.method_mut() = head.as_ref().method.clone();
*req.version_mut() = Version::HTTP_2;
let mut skip_len = true;
// let mut has_date = false;
// Content length
let _ = match length {
BodySize::None => None,
BodySize::Stream => {
skip_len = false;
None
}
BodySize::Empty => req
.headers_mut()
.insert(CONTENT_LENGTH, HeaderValue::from_static("0")),
BodySize::Sized(len) => req.headers_mut().insert(
CONTENT_LENGTH,
HeaderValue::try_from(format!("{}", len)).unwrap(),
),
};
io.ready()
.map_err(SendRequestError::from)
.and_then(move |mut io| {
let mut req = Request::new(());
*req.uri_mut() = head.uri;
*req.method_mut() = head.method;
*req.version_mut() = Version::HTTP_2;
// Extracting extra headers from RequestHeadType. HeaderMap::new() does not allocate.
let (head, extra_headers) = match head {
RequestHeadType::Owned(head) => (RequestHeadType::Owned(head), HeaderMap::new()),
RequestHeadType::Rc(head, extra_headers) => (
RequestHeadType::Rc(head, None),
extra_headers.unwrap_or_else(HeaderMap::new),
),
};
let mut skip_len = true;
// let mut has_date = false;
// merging headers from head and extra headers.
let headers = head
.as_ref()
.headers
.iter()
.filter(|(name, _)| !extra_headers.contains_key(*name))
.chain(extra_headers.iter());
// Content length
let _ = match length {
BodySize::None => None,
BodySize::Stream => {
skip_len = false;
None
}
BodySize::Empty => req
.headers_mut()
.insert(CONTENT_LENGTH, HeaderValue::from_static("0")),
BodySize::Sized(len) => req.headers_mut().insert(
CONTENT_LENGTH,
HeaderValue::try_from(format!("{}", len)).unwrap(),
),
BodySize::Sized64(len) => req.headers_mut().insert(
CONTENT_LENGTH,
HeaderValue::try_from(format!("{}", len)).unwrap(),
),
};
// copy headers
for (key, value) in headers {
match *key {
CONNECTION | TRANSFER_ENCODING => continue, // http2 specific
CONTENT_LENGTH if skip_len => continue,
// DATE => has_date = true,
_ => (),
}
req.headers_mut().append(key, value.clone());
}
// copy headers
for (key, value) in head.headers.iter() {
match *key {
CONNECTION | TRANSFER_ENCODING => continue, // http2 specific
CONTENT_LENGTH if skip_len => continue,
// DATE => has_date = true,
_ => (),
}
req.headers_mut().append(key, value.clone());
let res = poll_fn(|cx| io.poll_ready(cx)).await;
if let Err(e) = res {
release(io, pool, created, e.is_io());
return Err(SendRequestError::from(e));
}
let resp = match io.send_request(req, eof) {
Ok((fut, send)) => {
release(io, pool, created, false);
if !eof {
send_body(body, send).await?;
}
fut.await.map_err(SendRequestError::from)?
}
Err(e) => {
release(io, pool, created, e.is_io());
return Err(e.into());
}
};
match io.send_request(req, eof) {
Ok((res, send)) => {
release(io, pool, created, false);
let (parts, body) = resp.into_parts();
let payload = if head_req { Payload::None } else { body.into() };
if !eof {
Either::A(Either::B(
SendBody {
body,
send,
buf: None,
}
.and_then(move |_| res.map_err(SendRequestError::from)),
))
} else {
Either::B(res.map_err(SendRequestError::from))
}
}
Err(e) => {
release(io, pool, created, e.is_io());
Either::A(Either::A(err(e.into())))
}
}
})
.and_then(move |resp| {
let (parts, body) = resp.into_parts();
let payload = if head_req { Payload::None } else { body.into() };
let mut head = ResponseHead::default();
head.version = parts.version;
head.status = parts.status;
head.headers = parts.headers;
Ok((head, payload))
})
.from_err()
let mut head = ResponseHead::new(parts.status);
head.version = parts.version;
head.headers = parts.headers.into();
Ok((head, payload))
}
struct SendBody<B: MessageBody> {
async fn send_body<B: MessageBody>(
body: B,
send: SendStream<Bytes>,
buf: Option<Bytes>,
}
impl<B: MessageBody> Future for SendBody<B> {
type Item = ();
type Error = SendRequestError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
if self.buf.is_none() {
match self.body.poll_next() {
Ok(Async::Ready(Some(buf))) => {
self.send.reserve_capacity(buf.len());
self.buf = Some(buf);
}
Ok(Async::Ready(None)) => {
if let Err(e) = self.send.send_data(Bytes::new(), true) {
return Err(e.into());
}
self.send.reserve_capacity(0);
return Ok(Async::Ready(()));
}
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(e) => return Err(e.into()),
mut send: SendStream<Bytes>,
) -> Result<(), SendRequestError> {
let mut buf = None;
pin_mut!(body);
loop {
if buf.is_none() {
match poll_fn(|cx| body.as_mut().poll_next(cx)).await {
Some(Ok(b)) => {
send.reserve_capacity(b.len());
buf = Some(b);
}
}
match self.send.poll_capacity() {
Ok(Async::NotReady) => return Ok(Async::NotReady),
Ok(Async::Ready(None)) => return Ok(Async::Ready(())),
Ok(Async::Ready(Some(cap))) => {
let mut buf = self.buf.take().unwrap();
let len = buf.len();
let bytes = buf.split_to(std::cmp::min(cap, len));
if let Err(e) = self.send.send_data(bytes, false) {
Some(Err(e)) => return Err(e.into()),
None => {
if let Err(e) = send.send_data(Bytes::new(), true) {
return Err(e.into());
} else {
if !buf.is_empty() {
self.send.reserve_capacity(buf.len());
self.buf = Some(buf);
}
continue;
}
send.reserve_capacity(0);
return Ok(());
}
Err(e) => return Err(e.into()),
}
}
match poll_fn(|cx| send.poll_capacity(cx)).await {
None => return Ok(()),
Some(Ok(cap)) => {
let b = buf.as_mut().unwrap();
let len = b.len();
let bytes = b.split_to(std::cmp::min(cap, len));
if let Err(e) = send.send_data(bytes, false) {
return Err(e.into());
} else {
if !b.is_empty() {
send.reserve_capacity(b.len());
} else {
buf = None;
}
continue;
}
}
Some(Err(e)) => return Err(e.into()),
}
}
}
// release SendRequest object
fn release<T: AsyncRead + AsyncWrite + 'static>(
fn release<T: AsyncRead + AsyncWrite + Unpin + 'static>(
io: SendRequest<Bytes>,
pool: Option<Acquired<T>>,
created: time::Instant,
@ -183,3 +186,18 @@ fn release<T: AsyncRead + AsyncWrite + 'static>(
}
}
}
pub(crate) fn handshake<Io>(
io: Io,
config: &ConnectorConfig,
) -> impl Future<Output = Result<(SendRequest<Bytes>, Connection<Io, Bytes>), h2::Error>>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
let mut builder = Builder::new();
builder
.initial_window_size(config.stream_window_size)
.initial_connection_window_size(config.conn_window_size)
.enable_push(false);
builder.handshake(io)
}

View File

@ -1,4 +1,7 @@
//! Http client api
use http::Uri;
mod config;
mod connection;
mod connector;
mod error;
@ -8,4 +11,11 @@ mod pool;
pub use self::connection::Connection;
pub use self::connector::Connector;
pub use self::error::{ConnectError, InvalidUrl, SendRequestError};
pub use self::error::{ConnectError, FreezeRequestError, InvalidUrl, SendRequestError};
pub use self::pool::Protocol;
#[derive(Clone)]
pub struct Connect {
pub uri: Uri,
pub addr: Option<std::net::SocketAddr>,
}

View File

@ -1,27 +1,32 @@
use std::cell::RefCell;
use std::collections::VecDeque;
use std::io;
use std::future::Future;
use std::pin::Pin;
use std::rc::Rc;
use std::task::{Context, Poll};
use std::time::{Duration, Instant};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::time::{delay_for, Delay};
use actix_service::Service;
use actix_utils::{oneshot, task::LocalWaker};
use bytes::Bytes;
use futures::future::{err, ok, Either, FutureResult};
use futures::task::AtomicTask;
use futures::unsync::oneshot;
use futures::{Async, Future, Poll};
use h2::client::{handshake, Handshake};
use hashbrown::HashMap;
use http::uri::{Authority, Uri};
use futures_util::future::{poll_fn, FutureExt, LocalBoxFuture};
use fxhash::FxHashMap;
use h2::client::{Connection, SendRequest};
use http::uri::Authority;
use indexmap::IndexSet;
use pin_project::pin_project;
use slab::Slab;
use tokio_timer::{sleep, Delay};
use super::config::ConnectorConfig;
use super::connection::{ConnectionType, IoConnection};
use super::error::ConnectError;
use super::h2proto::handshake;
use super::Connect;
#[derive(Clone, Copy, PartialEq)]
/// Protocol version
pub enum Protocol {
Http1,
Http2,
@ -39,234 +44,202 @@ impl From<Authority> for Key {
}
/// Connections pool
pub(crate) struct ConnectionPool<T, Io: AsyncRead + AsyncWrite + 'static>(
T,
Rc<RefCell<Inner<Io>>>,
);
pub(crate) struct ConnectionPool<T, Io: 'static>(Rc<RefCell<T>>, Rc<RefCell<Inner<Io>>>);
impl<T, Io> ConnectionPool<T, Io>
where
Io: AsyncRead + AsyncWrite + 'static,
T: Service<Request = Uri, Response = (Io, Protocol), Error = ConnectError>,
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
pub(crate) fn new(
connector: T,
conn_lifetime: Duration,
conn_keep_alive: Duration,
disconnect_timeout: Option<Duration>,
limit: usize,
) -> Self {
ConnectionPool(
connector,
Rc::new(RefCell::new(Inner {
conn_lifetime,
conn_keep_alive,
disconnect_timeout,
limit,
acquired: 0,
waiters: Slab::new(),
waiters_queue: IndexSet::new(),
available: HashMap::new(),
task: AtomicTask::new(),
})),
)
pub(crate) fn new(connector: T, config: ConnectorConfig) -> Self {
let connector_rc = Rc::new(RefCell::new(connector));
let inner_rc = Rc::new(RefCell::new(Inner {
config,
acquired: 0,
waiters: Slab::new(),
waiters_queue: IndexSet::new(),
available: FxHashMap::default(),
waker: LocalWaker::new(),
}));
// start support future
actix_rt::spawn(ConnectorPoolSupport {
connector: Rc::clone(&connector_rc),
inner: Rc::clone(&inner_rc),
});
ConnectionPool(connector_rc, inner_rc)
}
}
impl<T, Io> Clone for ConnectionPool<T, Io>
where
T: Clone,
Io: AsyncRead + AsyncWrite + 'static,
Io: 'static,
{
fn clone(&self) -> Self {
ConnectionPool(self.0.clone(), self.1.clone())
}
}
impl<T, Io> Service for ConnectionPool<T, Io>
where
Io: AsyncRead + AsyncWrite + 'static,
T: Service<Request = Uri, Response = (Io, Protocol), Error = ConnectError>,
{
type Request = Uri;
type Response = IoConnection<Io>;
type Error = ConnectError;
type Future = Either<
FutureResult<Self::Response, Self::Error>,
Either<WaitForConnection<Io>, OpenConnection<T::Future, Io>>,
>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.0.poll_ready()
}
fn call(&mut self, req: Uri) -> Self::Future {
let key = if let Some(authority) = req.authority_part() {
authority.clone().into()
} else {
return Either::A(err(ConnectError::Unresolverd));
};
// acquire connection
match self.1.as_ref().borrow_mut().acquire(&key) {
Acquire::Acquired(io, created) => {
// use existing connection
Either::A(ok(IoConnection::new(
io,
created,
Some(Acquired(key, Some(self.1.clone()))),
)))
}
Acquire::NotAvailable => {
// connection is not available, wait
let (rx, token) = self.1.as_ref().borrow_mut().wait_for(req);
Either::B(Either::A(WaitForConnection {
rx,
key,
token,
inner: Some(self.1.clone()),
}))
}
Acquire::Available => {
// open new connection
Either::B(Either::B(OpenConnection::new(
key,
self.1.clone(),
self.0.call(req),
)))
}
}
impl<T, Io> Drop for ConnectionPool<T, Io> {
fn drop(&mut self) {
// wake up the ConnectorPoolSupport when dropping so it can exit properly.
self.1.borrow().waker.wake();
}
}
#[doc(hidden)]
pub struct WaitForConnection<Io>
impl<T, Io> Service for ConnectionPool<T, Io>
where
Io: AsyncRead + AsyncWrite + 'static,
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
type Request = Connect;
type Response = IoConnection<Io>;
type Error = ConnectError;
type Future = LocalBoxFuture<'static, Result<IoConnection<Io>, ConnectError>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.poll_ready(cx)
}
fn call(&mut self, req: Connect) -> Self::Future {
let mut connector = self.0.clone();
let inner = self.1.clone();
let fut = async move {
let key = if let Some(authority) = req.uri.authority() {
authority.clone().into()
} else {
return Err(ConnectError::Unresolved);
};
// acquire connection
match poll_fn(|cx| Poll::Ready(inner.borrow_mut().acquire(&key, cx))).await {
Acquire::Acquired(io, created) => {
// use existing connection
Ok(IoConnection::new(
io,
created,
Some(Acquired(key, Some(inner))),
))
}
Acquire::Available => {
// open tcp connection
let (io, proto) = connector.call(req).await?;
let config = inner.borrow().config.clone();
let guard = OpenGuard::new(key, inner);
if proto == Protocol::Http1 {
Ok(IoConnection::new(
ConnectionType::H1(io),
Instant::now(),
Some(guard.consume()),
))
} else {
let (snd, connection) = handshake(io, &config).await?;
actix_rt::spawn(connection.map(|_| ()));
Ok(IoConnection::new(
ConnectionType::H2(snd),
Instant::now(),
Some(guard.consume()),
))
}
}
_ => {
// connection is not available, wait
let (rx, token) = inner.borrow_mut().wait_for(req);
let guard = WaiterGuard::new(key, token, inner);
let res = match rx.await {
Err(_) => Err(ConnectError::Disconnected),
Ok(res) => res,
};
guard.consume();
res
}
}
};
fut.boxed_local()
}
}
struct WaiterGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
key: Key,
token: usize,
rx: oneshot::Receiver<Result<IoConnection<Io>, ConnectError>>,
inner: Option<Rc<RefCell<Inner<Io>>>>,
}
impl<Io> Drop for WaitForConnection<Io>
impl<Io> WaiterGuard<Io>
where
Io: AsyncRead + AsyncWrite + 'static,
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn new(key: Key, token: usize, inner: Rc<RefCell<Inner<Io>>>) -> Self {
Self {
key,
token,
inner: Some(inner),
}
}
fn consume(mut self) {
let _ = self.inner.take();
}
}
impl<Io> Drop for WaiterGuard<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn drop(&mut self) {
if let Some(i) = self.inner.take() {
let mut inner = i.as_ref().borrow_mut();
inner.release_waiter(&self.key, self.token);
inner.check_availibility();
inner.check_availability();
}
}
}
impl<Io> Future for WaitForConnection<Io>
struct OpenGuard<Io>
where
Io: AsyncRead + AsyncWrite,
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
type Item = IoConnection<Io>;
type Error = ConnectError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.rx.poll() {
Ok(Async::Ready(item)) => match item {
Err(err) => Err(err),
Ok(conn) => {
let _ = self.inner.take();
Ok(Async::Ready(conn))
}
},
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(_) => {
let _ = self.inner.take();
Err(ConnectError::Disconnected)
}
}
}
}
#[doc(hidden)]
pub struct OpenConnection<F, Io>
where
Io: AsyncRead + AsyncWrite + 'static,
{
fut: F,
key: Key,
h2: Option<Handshake<Io, Bytes>>,
inner: Option<Rc<RefCell<Inner<Io>>>>,
}
impl<F, Io> OpenConnection<F, Io>
impl<Io> OpenGuard<Io>
where
F: Future<Item = (Io, Protocol), Error = ConnectError>,
Io: AsyncRead + AsyncWrite + 'static,
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn new(key: Key, inner: Rc<RefCell<Inner<Io>>>, fut: F) -> Self {
OpenConnection {
fn new(key: Key, inner: Rc<RefCell<Inner<Io>>>) -> Self {
Self {
key,
fut,
inner: Some(inner),
h2: None,
}
}
fn consume(mut self) -> Acquired<Io> {
Acquired(self.key.clone(), self.inner.take())
}
}
impl<F, Io> Drop for OpenConnection<F, Io>
impl<Io> Drop for OpenGuard<Io>
where
Io: AsyncRead + AsyncWrite + 'static,
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn drop(&mut self) {
if let Some(inner) = self.inner.take() {
let mut inner = inner.as_ref().borrow_mut();
if let Some(i) = self.inner.take() {
let mut inner = i.as_ref().borrow_mut();
inner.release();
inner.check_availibility();
}
}
}
impl<F, Io> Future for OpenConnection<F, Io>
where
F: Future<Item = (Io, Protocol), Error = ConnectError>,
Io: AsyncRead + AsyncWrite,
{
type Item = IoConnection<Io>;
type Error = ConnectError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if let Some(ref mut h2) = self.h2 {
return match h2.poll() {
Ok(Async::Ready((snd, connection))) => {
tokio_current_thread::spawn(connection.map_err(|_| ()));
Ok(Async::Ready(IoConnection::new(
ConnectionType::H2(snd),
Instant::now(),
Some(Acquired(self.key.clone(), self.inner.clone())),
)))
}
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(e) => Err(e.into()),
};
}
match self.fut.poll() {
Err(err) => Err(err),
Ok(Async::Ready((io, proto))) => {
let _ = self.inner.take();
if proto == Protocol::Http1 {
Ok(Async::Ready(IoConnection::new(
ConnectionType::H1(io),
Instant::now(),
Some(Acquired(self.key.clone(), self.inner.clone())),
)))
} else {
self.h2 = Some(handshake(io));
self.poll()
}
}
Ok(Async::NotReady) => Ok(Async::NotReady),
inner.check_availability();
}
}
}
@ -277,7 +250,6 @@ enum Acquire<T> {
NotAvailable,
}
// #[derive(Debug)]
struct AvailableConnection<Io> {
io: ConnectionType<Io>,
used: Instant,
@ -285,15 +257,17 @@ struct AvailableConnection<Io> {
}
pub(crate) struct Inner<Io> {
conn_lifetime: Duration,
conn_keep_alive: Duration,
disconnect_timeout: Option<Duration>,
limit: usize,
config: ConnectorConfig,
acquired: usize,
available: HashMap<Key, VecDeque<AvailableConnection<Io>>>,
waiters: Slab<(Uri, oneshot::Sender<Result<IoConnection<Io>, ConnectError>>)>,
available: FxHashMap<Key, VecDeque<AvailableConnection<Io>>>,
waiters: Slab<
Option<(
Connect,
oneshot::Sender<Result<IoConnection<Io>, ConnectError>>,
)>,
>,
waiters_queue: IndexSet<(Key, usize)>,
task: AtomicTask,
waker: LocalWaker,
}
impl<Io> Inner<Io> {
@ -307,7 +281,79 @@ impl<Io> Inner<Io> {
fn release_waiter(&mut self, key: &Key, token: usize) {
self.waiters.remove(token);
self.waiters_queue.remove(&(key.clone(), token));
let _ = self.waiters_queue.shift_remove(&(key.clone(), token));
}
}
impl<Io> Inner<Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
/// connection is not available, wait
fn wait_for(
&mut self,
connect: Connect,
) -> (
oneshot::Receiver<Result<IoConnection<Io>, ConnectError>>,
usize,
) {
let (tx, rx) = oneshot::channel();
let key: Key = connect.uri.authority().unwrap().clone().into();
let entry = self.waiters.vacant_entry();
let token = entry.key();
entry.insert(Some((connect, tx)));
assert!(self.waiters_queue.insert((key, token)));
(rx, token)
}
fn acquire(&mut self, key: &Key, cx: &mut Context<'_>) -> Acquire<Io> {
// check limits
if self.config.limit > 0 && self.acquired >= self.config.limit {
return Acquire::NotAvailable;
}
self.reserve();
// check if open connection is available
// cleanup stale connections at the same time
if let Some(ref mut connections) = self.available.get_mut(key) {
let now = Instant::now();
while let Some(conn) = connections.pop_back() {
// check if it still usable
if (now - conn.used) > self.config.conn_keep_alive
|| (now - conn.created) > self.config.conn_lifetime
{
if let Some(timeout) = self.config.disconnect_timeout {
if let ConnectionType::H1(io) = conn.io {
actix_rt::spawn(CloseConnection::new(io, timeout))
}
}
} else {
let mut io = conn.io;
let mut buf = [0; 2];
if let ConnectionType::H1(ref mut s) = io {
match Pin::new(s).poll_read(cx, &mut buf) {
Poll::Pending => (),
Poll::Ready(Ok(n)) if n > 0 => {
if let Some(timeout) = self.config.disconnect_timeout {
if let ConnectionType::H1(io) = io {
actix_rt::spawn(CloseConnection::new(
io, timeout,
))
}
}
continue;
}
_ => continue,
}
}
return Acquire::Acquired(io, conn.created);
}
}
}
Acquire::Available
}
fn release_conn(&mut self, key: &Key, io: ConnectionType<Io>, created: Instant) {
@ -320,93 +366,22 @@ impl<Io> Inner<Io> {
created,
used: Instant::now(),
});
}
}
impl<Io> Inner<Io>
where
Io: AsyncRead + AsyncWrite + 'static,
{
/// connection is not available, wait
fn wait_for(
&mut self,
connect: Uri,
) -> (
oneshot::Receiver<Result<IoConnection<Io>, ConnectError>>,
usize,
) {
let (tx, rx) = oneshot::channel();
let key: Key = connect.authority_part().unwrap().clone().into();
let entry = self.waiters.vacant_entry();
let token = entry.key();
entry.insert((connect, tx));
assert!(!self.waiters_queue.insert((key, token)));
(rx, token)
}
fn acquire(&mut self, key: &Key) -> Acquire<Io> {
// check limits
if self.limit > 0 && self.acquired >= self.limit {
return Acquire::NotAvailable;
}
self.reserve();
// check if open connection is available
// cleanup stale connections at the same time
if let Some(ref mut connections) = self.available.get_mut(key) {
let now = Instant::now();
while let Some(conn) = connections.pop_back() {
// check if it still usable
if (now - conn.used) > self.conn_keep_alive
|| (now - conn.created) > self.conn_lifetime
{
if let Some(timeout) = self.disconnect_timeout {
if let ConnectionType::H1(io) = conn.io {
tokio_current_thread::spawn(CloseConnection::new(
io, timeout,
))
}
}
} else {
let mut io = conn.io;
let mut buf = [0; 2];
if let ConnectionType::H1(ref mut s) = io {
match s.read(&mut buf) {
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => (),
Ok(n) if n > 0 => {
if let Some(timeout) = self.disconnect_timeout {
if let ConnectionType::H1(io) = io {
tokio_current_thread::spawn(
CloseConnection::new(io, timeout),
)
}
}
continue;
}
Ok(_) | Err(_) => continue,
}
}
return Acquire::Acquired(io, conn.created);
}
}
}
Acquire::Available
self.check_availability();
}
fn release_close(&mut self, io: ConnectionType<Io>) {
self.acquired -= 1;
if let Some(timeout) = self.disconnect_timeout {
if let Some(timeout) = self.config.disconnect_timeout {
if let ConnectionType::H1(io) = io {
tokio_current_thread::spawn(CloseConnection::new(io, timeout))
actix_rt::spawn(CloseConnection::new(io, timeout))
}
}
self.check_availability();
}
fn check_availibility(&self) {
if !self.waiters_queue.is_empty() && self.acquired < self.limit {
self.task.notify()
fn check_availability(&self) {
if !self.waiters_queue.is_empty() && self.acquired < self.config.limit {
self.waker.wake();
}
}
}
@ -418,39 +393,230 @@ struct CloseConnection<T> {
impl<T> CloseConnection<T>
where
T: AsyncWrite,
T: AsyncWrite + Unpin,
{
fn new(io: T, timeout: Duration) -> Self {
CloseConnection {
io,
timeout: sleep(timeout),
timeout: delay_for(timeout),
}
}
}
impl<T> Future for CloseConnection<T>
where
T: AsyncWrite,
T: AsyncWrite + Unpin,
{
type Item = ();
type Error = ();
type Output = ();
fn poll(&mut self) -> Poll<(), ()> {
match self.timeout.poll() {
Ok(Async::Ready(_)) | Err(_) => Ok(Async::Ready(())),
Ok(Async::NotReady) => match self.io.shutdown() {
Ok(Async::Ready(_)) | Err(_) => Ok(Async::Ready(())),
Ok(Async::NotReady) => Ok(Async::NotReady),
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<()> {
let this = self.get_mut();
match Pin::new(&mut this.timeout).poll(cx) {
Poll::Ready(_) => Poll::Ready(()),
Poll::Pending => match Pin::new(&mut this.io).poll_shutdown(cx) {
Poll::Ready(_) => Poll::Ready(()),
Poll::Pending => Poll::Pending,
},
}
}
}
#[pin_project]
struct ConnectorPoolSupport<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
connector: T,
inner: Rc<RefCell<Inner<Io>>>,
}
impl<T, Io> Future for ConnectorPoolSupport<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>,
T::Future: 'static,
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
if Rc::strong_count(this.inner) == 1 {
// If we are last copy of Inner<Io> it means the ConnectionPool is already gone
// and we are safe to exit.
return Poll::Ready(());
}
let mut inner = this.inner.borrow_mut();
inner.waker.register(cx.waker());
// check waiters
loop {
let (key, token) = {
if let Some((key, token)) = inner.waiters_queue.get_index(0) {
(key.clone(), *token)
} else {
break;
}
};
if inner.waiters.get(token).unwrap().is_none() {
continue;
}
match inner.acquire(&key, cx) {
Acquire::NotAvailable => break,
Acquire::Acquired(io, created) => {
let tx = inner.waiters.get_mut(token).unwrap().take().unwrap().1;
if let Err(conn) = tx.send(Ok(IoConnection::new(
io,
created,
Some(Acquired(key.clone(), Some(this.inner.clone()))),
))) {
let (io, created) = conn.unwrap().into_inner();
inner.release_conn(&key, io, created);
}
}
Acquire::Available => {
let (connect, tx) =
inner.waiters.get_mut(token).unwrap().take().unwrap();
OpenWaitingConnection::spawn(
key.clone(),
tx,
this.inner.clone(),
this.connector.call(connect),
inner.config.clone(),
);
}
}
let _ = inner.waiters_queue.swap_remove_index(0);
}
Poll::Pending
}
}
#[pin_project::pin_project(PinnedDrop)]
struct OpenWaitingConnection<F, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
#[pin]
fut: F,
key: Key,
h2: Option<
LocalBoxFuture<
'static,
Result<(SendRequest<Bytes>, Connection<Io, Bytes>), h2::Error>,
>,
>,
rx: Option<oneshot::Sender<Result<IoConnection<Io>, ConnectError>>>,
inner: Option<Rc<RefCell<Inner<Io>>>>,
config: ConnectorConfig,
}
impl<F, Io> OpenWaitingConnection<F, Io>
where
F: Future<Output = Result<(Io, Protocol), ConnectError>> + 'static,
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn spawn(
key: Key,
rx: oneshot::Sender<Result<IoConnection<Io>, ConnectError>>,
inner: Rc<RefCell<Inner<Io>>>,
fut: F,
config: ConnectorConfig,
) {
actix_rt::spawn(OpenWaitingConnection {
key,
fut,
h2: None,
rx: Some(rx),
inner: Some(inner),
config,
})
}
}
#[pin_project::pinned_drop]
impl<F, Io> PinnedDrop for OpenWaitingConnection<F, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn drop(self: Pin<&mut Self>) {
if let Some(inner) = self.project().inner.take() {
let mut inner = inner.as_ref().borrow_mut();
inner.release();
inner.check_availability();
}
}
}
impl<F, Io> Future for OpenWaitingConnection<F, Io>
where
F: Future<Output = Result<(Io, Protocol), ConnectError>>,
Io: AsyncRead + AsyncWrite + Unpin,
{
type Output = ();
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.as_mut().project();
if let Some(ref mut h2) = this.h2 {
return match Pin::new(h2).poll(cx) {
Poll::Ready(Ok((snd, connection))) => {
actix_rt::spawn(connection.map(|_| ()));
let rx = this.rx.take().unwrap();
let _ = rx.send(Ok(IoConnection::new(
ConnectionType::H2(snd),
Instant::now(),
Some(Acquired(this.key.clone(), this.inner.take())),
)));
Poll::Ready(())
}
Poll::Pending => Poll::Pending,
Poll::Ready(Err(err)) => {
let _ = this.inner.take();
if let Some(rx) = this.rx.take() {
let _ = rx.send(Err(ConnectError::H2(err)));
}
Poll::Ready(())
}
};
}
match this.fut.poll(cx) {
Poll::Ready(Err(err)) => {
let _ = this.inner.take();
if let Some(rx) = this.rx.take() {
let _ = rx.send(Err(err));
}
Poll::Ready(())
}
Poll::Ready(Ok((io, proto))) => {
if proto == Protocol::Http1 {
let rx = this.rx.take().unwrap();
let _ = rx.send(Ok(IoConnection::new(
ConnectionType::H1(io),
Instant::now(),
Some(Acquired(this.key.clone(), this.inner.take())),
)));
Poll::Ready(())
} else {
*this.h2 = Some(handshake(io, this.config).boxed_local());
self.poll(cx)
}
}
Poll::Pending => Poll::Pending,
}
}
}
pub(crate) struct Acquired<T>(Key, Option<Rc<RefCell<Inner<T>>>>);
impl<T> Acquired<T>
where
T: AsyncRead + AsyncWrite + 'static,
T: AsyncRead + AsyncWrite + Unpin + 'static,
{
pub(crate) fn close(&mut self, conn: IoConnection<T>) {
if let Some(inner) = self.1.take() {

View File

@ -0,0 +1,40 @@
use std::cell::RefCell;
use std::rc::Rc;
use std::task::{Context, Poll};
use actix_service::Service;
#[doc(hidden)]
/// Service that allows to turn non-clone service to a service with `Clone` impl
///
/// # Panics
/// CloneableService might panic with some creative use of thread local storage.
/// See https://github.com/actix/actix-web/issues/1295 for example
pub(crate) struct CloneableService<T: Service>(Rc<RefCell<T>>);
impl<T: Service> CloneableService<T> {
pub(crate) fn new(service: T) -> Self {
Self(Rc::new(RefCell::new(service)))
}
}
impl<T: Service> Clone for CloneableService<T> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
impl<T: Service> Service for CloneableService<T> {
type Request = T::Request;
type Response = T::Response;
type Error = T::Error;
type Future = T::Future;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.borrow_mut().poll_ready(cx)
}
fn call(&mut self, req: T::Request) -> Self::Future {
self.0.borrow_mut().call(req)
}
}

View File

@ -1,13 +1,13 @@
use std::cell::UnsafeCell;
use std::fmt;
use std::cell::Cell;
use std::fmt::Write;
use std::rc::Rc;
use std::time::{Duration, Instant};
use std::time::Duration;
use std::{fmt, net};
use actix_rt::time::{delay_for, delay_until, Delay, Instant};
use bytes::BytesMut;
use futures::{future, Future};
use time;
use tokio_timer::{sleep, Delay};
use futures_util::{future, FutureExt};
use time::OffsetDateTime;
// "Sun, 06 Nov 1994 08:49:37 GMT".len()
const DATE_VALUE_LENGTH: usize = 29;
@ -17,7 +17,7 @@ const DATE_VALUE_LENGTH: usize = 29;
pub enum KeepAlive {
/// Keep alive in seconds
Timeout(usize),
/// Relay on OS to shutdown tcp connection
/// Rely on OS to shutdown tcp connection
Os,
/// Disabled
Disabled,
@ -47,6 +47,8 @@ struct Inner {
client_timeout: u64,
client_disconnect: u64,
ka_enabled: bool,
secure: bool,
local_addr: Option<std::net::SocketAddr>,
timer: DateService,
}
@ -58,7 +60,7 @@ impl Clone for ServiceConfig {
impl Default for ServiceConfig {
fn default() -> Self {
Self::new(KeepAlive::Timeout(5), 0, 0)
Self::new(KeepAlive::Timeout(5), 0, 0, false, None)
}
}
@ -68,6 +70,8 @@ impl ServiceConfig {
keep_alive: KeepAlive,
client_timeout: u64,
client_disconnect: u64,
secure: bool,
local_addr: Option<net::SocketAddr>,
) -> ServiceConfig {
let (keep_alive, ka_enabled) = match keep_alive {
KeepAlive::Timeout(val) => (val as u64, true),
@ -85,10 +89,24 @@ impl ServiceConfig {
ka_enabled,
client_timeout,
client_disconnect,
secure,
local_addr,
timer: DateService::new(),
}))
}
#[inline]
/// Returns true if connection is secure(https)
pub fn secure(&self) -> bool {
self.0.secure
}
#[inline]
/// Returns the local address that this server is bound to.
pub fn local_addr(&self) -> Option<net::SocketAddr> {
self.0.local_addr
}
#[inline]
/// Keep alive duration if configured.
pub fn keep_alive(&self) -> Option<Duration> {
@ -96,7 +114,7 @@ impl ServiceConfig {
}
#[inline]
/// Return state of connection keep-alive funcitonality
/// Return state of connection keep-alive functionality
pub fn keep_alive_enabled(&self) -> bool {
self.0.ka_enabled
}
@ -104,10 +122,10 @@ impl ServiceConfig {
#[inline]
/// Client timeout for first request.
pub fn client_timer(&self) -> Option<Delay> {
let delay = self.0.client_timeout;
if delay != 0 {
Some(Delay::new(
self.0.timer.now() + Duration::from_millis(delay),
let delay_time = self.0.client_timeout;
if delay_time != 0 {
Some(delay_until(
self.0.timer.now() + Duration::from_millis(delay_time),
))
} else {
None
@ -138,7 +156,7 @@ impl ServiceConfig {
/// Return keep-alive timer delay is configured.
pub fn keep_alive_timer(&self) -> Option<Delay> {
if let Some(ka) = self.0.keep_alive {
Some(Delay::new(self.0.timer.now() + ka))
Some(delay_until(self.0.timer.now() + ka))
} else {
None
}
@ -158,19 +176,25 @@ impl ServiceConfig {
self.0.timer.now()
}
pub(crate) fn set_date(&self, dst: &mut BytesMut) {
#[doc(hidden)]
pub fn set_date(&self, dst: &mut BytesMut) {
let mut buf: [u8; 39] = [0; 39];
buf[..6].copy_from_slice(b"date: ");
buf[6..35].copy_from_slice(&self.0.timer.date().bytes);
self.0
.timer
.set_date(|date| buf[6..35].copy_from_slice(&date.bytes));
buf[35..].copy_from_slice(b"\r\n\r\n");
dst.extend_from_slice(&buf);
}
pub(crate) fn set_date_header(&self, dst: &mut BytesMut) {
dst.extend_from_slice(&self.0.timer.date().bytes);
self.0
.timer
.set_date(|date| dst.extend_from_slice(&date.bytes));
}
}
#[derive(Copy, Clone)]
struct Date {
bytes: [u8; DATE_VALUE_LENGTH],
pos: usize,
@ -185,9 +209,15 @@ impl Date {
date.update();
date
}
fn update(&mut self) {
self.pos = 0;
write!(self, "{}", time::at_utc(time::get_time()).rfc822()).unwrap();
write!(
self,
"{}",
OffsetDateTime::now_utc().format("%a, %d %b %Y %H:%M:%S GMT")
)
.unwrap();
}
}
@ -204,28 +234,24 @@ impl fmt::Write for Date {
struct DateService(Rc<DateServiceInner>);
struct DateServiceInner {
current: UnsafeCell<Option<(Date, Instant)>>,
current: Cell<Option<(Date, Instant)>>,
}
impl DateServiceInner {
fn new() -> Self {
DateServiceInner {
current: UnsafeCell::new(None),
current: Cell::new(None),
}
}
fn get_ref(&self) -> &Option<(Date, Instant)> {
unsafe { &*self.current.get() }
}
fn reset(&self) {
unsafe { (&mut *self.current.get()).take() };
self.current.take();
}
fn update(&self) {
let now = Instant::now();
let date = Date::new();
*(unsafe { &mut *self.current.get() }) = Some((date, now));
self.current.set(Some((date, now)));
}
}
@ -235,56 +261,55 @@ impl DateService {
}
fn check_date(&self) {
if self.0.get_ref().is_none() {
if self.0.current.get().is_none() {
self.0.update();
// periodic date update
let s = self.clone();
tokio_current_thread::spawn(sleep(Duration::from_millis(500)).then(
move |_| {
s.0.reset();
future::ok(())
},
));
actix_rt::spawn(delay_for(Duration::from_millis(500)).then(move |_| {
s.0.reset();
future::ready(())
}));
}
}
fn now(&self) -> Instant {
self.check_date();
self.0.get_ref().as_ref().unwrap().1
self.0.current.get().unwrap().1
}
fn date(&self) -> &Date {
fn set_date<F: FnMut(&Date)>(&self, mut f: F) {
self.check_date();
let item = self.0.get_ref().as_ref().unwrap();
&item.0
f(&self.0.current.get().unwrap().0);
}
}
#[cfg(test)]
mod tests {
use super::*;
use actix_rt::System;
use futures::future;
// Test modifying the date from within the closure
// passed to `set_date`
#[test]
fn test_evil_date() {
let service = DateService::new();
// Make sure that `check_date` doesn't try to spawn a task
service.0.update();
service.set_date(|_| service.0.reset());
}
#[test]
fn test_date_len() {
assert_eq!(DATE_VALUE_LENGTH, "Sun, 06 Nov 1994 08:49:37 GMT".len());
}
#[test]
fn test_date() {
let mut rt = System::new("test");
let _ = rt.block_on(future::lazy(|| {
let settings = ServiceConfig::new(KeepAlive::Os, 0, 0);
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf1);
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf2);
assert_eq!(buf1, buf2);
future::ok::<_, ()>(())
}));
#[actix_rt::test]
async fn test_date() {
let settings = ServiceConfig::new(KeepAlive::Os, 0, 0, false, None);
let mut buf1 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf1);
let mut buf2 = BytesMut::with_capacity(DATE_VALUE_LENGTH + 10);
settings.set_date(&mut buf2);
assert_eq!(buf1, buf2);
}
}

View File

@ -1,17 +1,20 @@
use std::future::Future;
use std::io::{self, Write};
use std::pin::Pin;
use std::task::{Context, Poll};
use actix_threadpool::{run, CpuFuture};
#[cfg(feature = "brotli")]
use brotli2::write::BrotliDecoder;
use bytes::Bytes;
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
use flate2::write::{GzDecoder, ZlibDecoder};
use futures::{try_ready, Async, Future, Poll, Stream};
use futures_core::{ready, Stream};
use super::Writer;
use crate::error::PayloadError;
use crate::http::header::{ContentEncoding, HeaderMap, CONTENT_ENCODING};
const INPLACE: usize = 2049;
pub struct Decoder<S> {
decoder: Option<ContentDecoder>,
stream: S,
@ -21,21 +24,18 @@ pub struct Decoder<S> {
impl<S> Decoder<S>
where
S: Stream<Item = Bytes, Error = PayloadError>,
S: Stream<Item = Result<Bytes, PayloadError>>,
{
/// Construct a decoder.
#[inline]
pub fn new(stream: S, encoding: ContentEncoding) -> Decoder<S> {
let decoder = match encoding {
#[cfg(feature = "brotli")]
ContentEncoding::Br => Some(ContentDecoder::Br(Box::new(
BrotliDecoder::new(Writer::new()),
))),
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoding::Deflate => Some(ContentDecoder::Deflate(Box::new(
ZlibDecoder::new(Writer::new()),
))),
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoding::Gzip => Some(ContentDecoder::Gzip(Box::new(
GzDecoder::new(Writer::new()),
))),
@ -53,7 +53,7 @@ where
#[inline]
pub fn from_headers(stream: S, headers: &HeaderMap) -> Decoder<S> {
// check content-encoding
let encoding = if let Some(enc) = headers.get(CONTENT_ENCODING) {
let encoding = if let Some(enc) = headers.get(&CONTENT_ENCODING) {
if let Ok(enc) = enc.to_str() {
ContentEncoding::from(enc)
} else {
@ -69,74 +69,83 @@ where
impl<S> Stream for Decoder<S>
where
S: Stream<Item = Bytes, Error = PayloadError>,
S: Stream<Item = Result<Bytes, PayloadError>> + Unpin,
{
type Item = Bytes;
type Error = PayloadError;
type Item = Result<Bytes, PayloadError>;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
fn poll_next(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
loop {
if let Some(ref mut fut) = self.fut {
let (chunk, decoder) = try_ready!(fut.poll());
let (chunk, decoder) = match ready!(Pin::new(fut).poll(cx)) {
Ok(item) => item,
Err(e) => return Poll::Ready(Some(Err(e.into()))),
};
self.decoder = Some(decoder);
self.fut.take();
if let Some(chunk) = chunk {
return Ok(Async::Ready(Some(chunk)));
return Poll::Ready(Some(Ok(chunk)));
}
}
if self.eof {
return Ok(Async::Ready(None));
return Poll::Ready(None);
}
match self.stream.poll()? {
Async::Ready(Some(chunk)) => {
match Pin::new(&mut self.stream).poll_next(cx) {
Poll::Ready(Some(Err(err))) => return Poll::Ready(Some(Err(err))),
Poll::Ready(Some(Ok(chunk))) => {
if let Some(mut decoder) = self.decoder.take() {
self.fut = Some(run(move || {
if chunk.len() < INPLACE {
let chunk = decoder.feed_data(chunk)?;
Ok((chunk, decoder))
}));
self.decoder = Some(decoder);
if let Some(chunk) = chunk {
return Poll::Ready(Some(Ok(chunk)));
}
} else {
self.fut = Some(run(move || {
let chunk = decoder.feed_data(chunk)?;
Ok((chunk, decoder))
}));
}
continue;
} else {
return Ok(Async::Ready(Some(chunk)));
return Poll::Ready(Some(Ok(chunk)));
}
}
Async::Ready(None) => {
Poll::Ready(None) => {
self.eof = true;
if let Some(mut decoder) = self.decoder.take() {
self.fut = Some(run(move || {
let chunk = decoder.feed_eof()?;
Ok((chunk, decoder))
}));
continue;
return if let Some(mut decoder) = self.decoder.take() {
match decoder.feed_eof() {
Ok(Some(res)) => Poll::Ready(Some(Ok(res))),
Ok(None) => Poll::Ready(None),
Err(err) => Poll::Ready(Some(Err(err.into()))),
}
} else {
return Ok(Async::Ready(None));
Poll::Ready(None)
};
}
Async::NotReady => break,
Poll::Pending => break,
}
}
Ok(Async::NotReady)
Poll::Pending
}
}
enum ContentDecoder {
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
Deflate(Box<ZlibDecoder<Writer>>),
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
Gzip(Box<GzDecoder<Writer>>),
#[cfg(feature = "brotli")]
Br(Box<BrotliDecoder<Writer>>),
}
impl ContentDecoder {
#[allow(unreachable_patterns)]
fn feed_eof(&mut self) -> io::Result<Option<Bytes>> {
match self {
#[cfg(feature = "brotli")]
ContentDecoder::Br(ref mut decoder) => match decoder.finish() {
Ok(mut writer) => {
let b = writer.take();
ContentDecoder::Br(ref mut decoder) => match decoder.flush() {
Ok(()) => {
let b = decoder.get_mut().take();
if !b.is_empty() {
Ok(Some(b))
} else {
@ -145,7 +154,6 @@ impl ContentDecoder {
}
Err(e) => Err(e),
},
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentDecoder::Gzip(ref mut decoder) => match decoder.try_finish() {
Ok(_) => {
let b = decoder.get_mut().take();
@ -157,7 +165,6 @@ impl ContentDecoder {
}
Err(e) => Err(e),
},
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentDecoder::Deflate(ref mut decoder) => match decoder.try_finish() {
Ok(_) => {
let b = decoder.get_mut().take();
@ -169,14 +176,11 @@ impl ContentDecoder {
}
Err(e) => Err(e),
},
_ => Ok(None),
}
}
#[allow(unreachable_patterns)]
fn feed_data(&mut self, data: Bytes) -> io::Result<Option<Bytes>> {
match self {
#[cfg(feature = "brotli")]
ContentDecoder::Br(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
@ -189,7 +193,6 @@ impl ContentDecoder {
}
Err(e) => Err(e),
},
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentDecoder::Gzip(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
@ -202,7 +205,6 @@ impl ContentDecoder {
}
Err(e) => Err(e),
},
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentDecoder::Deflate(ref mut decoder) => match decoder.write_all(&data) {
Ok(_) => {
decoder.flush()?;
@ -215,7 +217,6 @@ impl ContentDecoder {
}
Err(e) => Err(e),
},
_ => Ok(Some(data)),
}
}
}

View File

@ -1,24 +1,32 @@
//! Stream encoder
use std::future::Future;
use std::io::{self, Write};
use std::pin::Pin;
use std::task::{Context, Poll};
use bytes::Bytes;
use futures::{Async, Poll};
#[cfg(feature = "brotli")]
use actix_threadpool::{run, CpuFuture};
use brotli2::write::BrotliEncoder;
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
use bytes::Bytes;
use flate2::write::{GzEncoder, ZlibEncoder};
use futures_core::ready;
use pin_project::pin_project;
use crate::body::{Body, BodySize, MessageBody, ResponseBody};
use crate::http::header::{ContentEncoding, CONTENT_ENCODING};
use crate::http::{HeaderValue, HttpTryFrom, StatusCode};
use crate::http::{HeaderValue, StatusCode};
use crate::{Error, ResponseHead};
use super::Writer;
const INPLACE: usize = 1024;
#[pin_project]
pub struct Encoder<B> {
eof: bool,
#[pin]
body: EncoderBody<B>,
encoder: Option<ContentEncoder>,
fut: Option<CpuFuture<ContentEncoder, io::Error>>,
}
impl<B: MessageBody> Encoder<B> {
@ -27,108 +35,154 @@ impl<B: MessageBody> Encoder<B> {
head: &mut ResponseHead,
body: ResponseBody<B>,
) -> ResponseBody<Encoder<B>> {
let has_ce = head.headers().contains_key(CONTENT_ENCODING);
match body {
ResponseBody::Other(b) => match b {
Body::None => ResponseBody::Other(Body::None),
Body::Empty => ResponseBody::Other(Body::Empty),
Body::Bytes(buf) => {
if !(has_ce
|| encoding == ContentEncoding::Identity
|| encoding == ContentEncoding::Auto)
{
let mut enc = ContentEncoder::encoder(encoding).unwrap();
let can_encode = !(head.headers().contains_key(&CONTENT_ENCODING)
|| head.status == StatusCode::SWITCHING_PROTOCOLS
|| head.status == StatusCode::NO_CONTENT
|| encoding == ContentEncoding::Identity
|| encoding == ContentEncoding::Auto);
// TODO return error!
let _ = enc.write(buf.as_ref());
let body = enc.finish().unwrap();
update_head(encoding, head);
ResponseBody::Other(Body::Bytes(body))
let body = match body {
ResponseBody::Other(b) => match b {
Body::None => return ResponseBody::Other(Body::None),
Body::Empty => return ResponseBody::Other(Body::Empty),
Body::Bytes(buf) => {
if can_encode {
EncoderBody::Bytes(buf)
} else {
ResponseBody::Other(Body::Bytes(buf))
}
}
Body::Message(stream) => {
if has_ce || head.status == StatusCode::SWITCHING_PROTOCOLS {
ResponseBody::Body(Encoder {
body: EncoderBody::Other(stream),
encoder: None,
})
} else {
update_head(encoding, head);
head.no_chunking(false);
ResponseBody::Body(Encoder {
body: EncoderBody::Other(stream),
encoder: ContentEncoder::encoder(encoding),
})
return ResponseBody::Other(Body::Bytes(buf));
}
}
Body::Message(stream) => EncoderBody::BoxedStream(stream),
},
ResponseBody::Body(stream) => {
if has_ce || head.status == StatusCode::SWITCHING_PROTOCOLS {
ResponseBody::Body(Encoder {
body: EncoderBody::Body(stream),
encoder: None,
})
ResponseBody::Body(stream) => EncoderBody::Stream(stream),
};
if can_encode {
// Modify response body only if encoder is not None
if let Some(enc) = ContentEncoder::encoder(encoding) {
update_head(encoding, head);
head.no_chunking(false);
return ResponseBody::Body(Encoder {
body,
eof: false,
fut: None,
encoder: Some(enc),
});
}
}
ResponseBody::Body(Encoder {
body,
eof: false,
fut: None,
encoder: None,
})
}
}
#[pin_project(project = EncoderBodyProj)]
enum EncoderBody<B> {
Bytes(Bytes),
Stream(#[pin] B),
BoxedStream(Box<dyn MessageBody + Unpin>),
}
impl<B: MessageBody> MessageBody for EncoderBody<B> {
fn size(&self) -> BodySize {
match self {
EncoderBody::Bytes(ref b) => b.size(),
EncoderBody::Stream(ref b) => b.size(),
EncoderBody::BoxedStream(ref b) => b.size(),
}
}
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
match self.project() {
EncoderBodyProj::Bytes(b) => {
if b.is_empty() {
Poll::Ready(None)
} else {
update_head(encoding, head);
head.no_chunking(false);
ResponseBody::Body(Encoder {
body: EncoderBody::Body(stream),
encoder: ContentEncoder::encoder(encoding),
})
Poll::Ready(Some(Ok(std::mem::take(b))))
}
}
EncoderBodyProj::Stream(b) => b.poll_next(cx),
EncoderBodyProj::BoxedStream(ref mut b) => {
Pin::new(b.as_mut()).poll_next(cx)
}
}
}
}
enum EncoderBody<B> {
Body(B),
Other(Box<dyn MessageBody>),
}
impl<B: MessageBody> MessageBody for Encoder<B> {
fn length(&self) -> BodySize {
fn size(&self) -> BodySize {
if self.encoder.is_none() {
match self.body {
EncoderBody::Body(ref b) => b.length(),
EncoderBody::Other(ref b) => b.length(),
}
self.body.size()
} else {
BodySize::Stream
}
}
fn poll_next(&mut self) -> Poll<Option<Bytes>, Error> {
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
let mut this = self.project();
loop {
let result = match self.body {
EncoderBody::Body(ref mut b) => b.poll_next()?,
EncoderBody::Other(ref mut b) => b.poll_next()?,
};
if *this.eof {
return Poll::Ready(None);
}
if let Some(ref mut fut) = this.fut {
let mut encoder = match ready!(Pin::new(fut).poll(cx)) {
Ok(item) => item,
Err(e) => return Poll::Ready(Some(Err(e.into()))),
};
let chunk = encoder.take();
*this.encoder = Some(encoder);
this.fut.take();
if !chunk.is_empty() {
return Poll::Ready(Some(Ok(chunk)));
}
}
let result = this.body.as_mut().poll_next(cx);
match result {
Async::NotReady => return Ok(Async::NotReady),
Async::Ready(Some(chunk)) => {
if let Some(ref mut encoder) = self.encoder {
if encoder.write(&chunk)? {
return Ok(Async::Ready(Some(encoder.take())));
Poll::Ready(Some(Ok(chunk))) => {
if let Some(mut encoder) = this.encoder.take() {
if chunk.len() < INPLACE {
encoder.write(&chunk)?;
let chunk = encoder.take();
*this.encoder = Some(encoder);
if !chunk.is_empty() {
return Poll::Ready(Some(Ok(chunk)));
}
} else {
*this.fut = Some(run(move || {
encoder.write(&chunk)?;
Ok(encoder)
}));
}
} else {
return Ok(Async::Ready(Some(chunk)));
return Poll::Ready(Some(Ok(chunk)));
}
}
Async::Ready(None) => {
if let Some(encoder) = self.encoder.take() {
Poll::Ready(None) => {
if let Some(encoder) = this.encoder.take() {
let chunk = encoder.finish()?;
if chunk.is_empty() {
return Ok(Async::Ready(None));
return Poll::Ready(None);
} else {
return Ok(Async::Ready(Some(chunk)));
*this.eof = true;
return Poll::Ready(Some(Ok(chunk)));
}
} else {
return Ok(Async::Ready(None));
return Poll::Ready(None);
}
}
val => return val,
}
}
}
@ -137,33 +191,27 @@ impl<B: MessageBody> MessageBody for Encoder<B> {
fn update_head(encoding: ContentEncoding, head: &mut ResponseHead) {
head.headers_mut().insert(
CONTENT_ENCODING,
HeaderValue::try_from(Bytes::from_static(encoding.as_str().as_bytes())).unwrap(),
HeaderValue::from_static(encoding.as_str()),
);
}
enum ContentEncoder {
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
Deflate(ZlibEncoder<Writer>),
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
Gzip(GzEncoder<Writer>),
#[cfg(feature = "brotli")]
Br(BrotliEncoder<Writer>),
}
impl ContentEncoder {
fn encoder(encoding: ContentEncoding) -> Option<Self> {
match encoding {
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoding::Deflate => Some(ContentEncoder::Deflate(ZlibEncoder::new(
Writer::new(),
flate2::Compression::fast(),
))),
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoding::Gzip => Some(ContentEncoder::Gzip(GzEncoder::new(
Writer::new(),
flate2::Compression::fast(),
))),
#[cfg(feature = "brotli")]
ContentEncoding::Br => {
Some(ContentEncoder::Br(BrotliEncoder::new(Writer::new(), 3)))
}
@ -174,28 +222,22 @@ impl ContentEncoder {
#[inline]
pub(crate) fn take(&mut self) -> Bytes {
match *self {
#[cfg(feature = "brotli")]
ContentEncoder::Br(ref mut encoder) => encoder.get_mut().take(),
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoder::Deflate(ref mut encoder) => encoder.get_mut().take(),
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoder::Gzip(ref mut encoder) => encoder.get_mut().take(),
}
}
fn finish(self) -> Result<Bytes, io::Error> {
match self {
#[cfg(feature = "brotli")]
ContentEncoder::Br(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err),
},
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoder::Gzip(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err),
},
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoder::Deflate(encoder) => match encoder.finish() {
Ok(writer) => Ok(writer.buf.freeze()),
Err(err) => Err(err),
@ -203,27 +245,24 @@ impl ContentEncoder {
}
}
fn write(&mut self, data: &[u8]) -> Result<bool, io::Error> {
fn write(&mut self, data: &[u8]) -> Result<(), io::Error> {
match *self {
#[cfg(feature = "brotli")]
ContentEncoder::Br(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(!encoder.get_ref().buf.is_empty()),
Ok(_) => Ok(()),
Err(err) => {
trace!("Error decoding br encoding: {}", err);
Err(err)
}
},
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoder::Gzip(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(!encoder.get_ref().buf.is_empty()),
Ok(_) => Ok(()),
Err(err) => {
trace!("Error decoding gzip encoding: {}", err);
Err(err)
}
},
#[cfg(any(feature = "flate2-zlib", feature = "flate2-rust"))]
ContentEncoder::Deflate(ref mut encoder) => match encoder.write_all(data) {
Ok(_) => Ok(!encoder.get_ref().buf.is_empty()),
Ok(_) => Ok(()),
Err(err) => {
trace!("Error decoding deflate encoding: {}", err);
Err(err)

View File

@ -19,8 +19,9 @@ impl Writer {
buf: BytesMut::with_capacity(8192),
}
}
fn take(&mut self) -> Bytes {
self.buf.take().freeze()
self.buf.split().freeze()
}
}
@ -29,6 +30,7 @@ impl io::Write for Writer {
self.buf.extend_from_slice(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}

View File

@ -1,30 +1,29 @@
//! Error and Result module
use std::cell::RefCell;
use std::io::Write;
use std::str::Utf8Error;
use std::string::FromUtf8Error;
use std::{fmt, io, result};
// use actix::MailboxError;
use actix_codec::{Decoder, Encoder};
pub use actix_threadpool::BlockingError;
use actix_utils::dispatcher::DispatcherError as FramedDispatcherError;
use actix_utils::timeout::TimeoutError;
#[cfg(feature = "cookies")]
use cookie;
use bytes::BytesMut;
use derive_more::{Display, From};
use futures::Canceled;
pub use futures_channel::oneshot::Canceled;
use http::uri::InvalidUri;
use http::{header, Error as HttpError, StatusCode};
use httparse;
use serde::de::value::Error as DeError;
use serde_json::error::Error as JsonError;
use serde_urlencoded::ser::Error as FormError;
use tokio_timer::Error as TimerError;
// re-export for convinience
#[cfg(feature = "cookies")]
pub use cookie::ParseError as CookieParseError;
// re-export for convenience
use crate::body::Body;
use crate::response::Response;
pub use crate::cookie::ParseError as CookieParseError;
use crate::helpers::Writer;
use crate::response::{Response, ResponseBuilder};
/// A specialized [`Result`](https://doc.rust-lang.org/std/result/enum.Result.html)
/// for actix web operations
@ -36,7 +35,7 @@ pub type Result<T, E = Error> = result::Result<T, E>;
/// General purpose actix web error.
///
/// An actix web error is used to carry errors from `failure` or `std::error`
/// An actix web error is used to carry errors from `std::error`
/// through actix in a convenient way. It can be created through
/// converting errors with `into()`.
///
@ -45,42 +44,82 @@ pub type Result<T, E = Error> = result::Result<T, E>;
/// if you have access to an actix `Error` you can always get a
/// `ResponseError` reference from it.
pub struct Error {
cause: Box<ResponseError>,
cause: Box<dyn ResponseError>,
}
impl Error {
/// Returns the reference to the underlying `ResponseError`.
pub fn as_response_error(&self) -> &ResponseError {
pub fn as_response_error(&self) -> &dyn ResponseError {
self.cause.as_ref()
}
/// Converts error to a response instance and set error message as response body
pub fn response_with_message(self) -> Response {
let message = format!("{}", self);
let resp: Response = self.into();
resp.set_body(Body::from(message))
/// Similar to `as_response_error` but downcasts.
pub fn as_error<T: ResponseError + 'static>(&self) -> Option<&T> {
ResponseError::downcast_ref(self.cause.as_ref())
}
}
/// Error that can be converted to `Response`
pub trait ResponseError: fmt::Debug + fmt::Display {
/// Response's status code
///
/// Internal server error is generated by default.
fn status_code(&self) -> StatusCode {
StatusCode::INTERNAL_SERVER_ERROR
}
/// Create response for error
///
/// Internal server error is generated by default.
fn error_response(&self) -> Response {
Response::new(StatusCode::INTERNAL_SERVER_ERROR)
let mut resp = Response::new(self.status_code());
let mut buf = BytesMut::new();
let _ = write!(Writer(&mut buf), "{}", self);
resp.headers_mut().insert(
header::CONTENT_TYPE,
header::HeaderValue::from_static("text/plain; charset=utf-8"),
);
resp.set_body(Body::from(buf))
}
downcast_get_type_id!();
}
downcast!(ResponseError);
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&self.cause, f)
}
}
impl fmt::Debug for Error {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "{:?}\n", &self.cause)
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", &self.cause)
}
}
impl std::error::Error for Error {
fn cause(&self) -> Option<&dyn std::error::Error> {
None
}
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
None
}
}
impl From<()> for Error {
fn from(_: ()) -> Self {
Error::from(UnitError)
}
}
impl From<std::convert::Infallible> for Error {
fn from(_: std::convert::Infallible) -> Self {
// `std::convert::Infallible` indicates an error
// that will never happen
unreachable!()
}
}
@ -100,39 +139,68 @@ impl<T: ResponseError + 'static> From<T> for Error {
}
}
/// Convert Response to a Error
impl From<Response> for Error {
fn from(res: Response) -> Error {
InternalError::from_response("", res).into()
}
}
/// Convert ResponseBuilder to a Error
impl From<ResponseBuilder> for Error {
fn from(mut res: ResponseBuilder) -> Error {
InternalError::from_response("", res.finish()).into()
}
}
/// Return `GATEWAY_TIMEOUT` for `TimeoutError`
impl<E: ResponseError> ResponseError for TimeoutError<E> {
fn error_response(&self) -> Response {
fn status_code(&self) -> StatusCode {
match self {
TimeoutError::Service(e) => e.error_response(),
TimeoutError::Timeout => Response::new(StatusCode::GATEWAY_TIMEOUT),
TimeoutError::Service(e) => e.status_code(),
TimeoutError::Timeout => StatusCode::GATEWAY_TIMEOUT,
}
}
}
#[derive(Debug, Display)]
#[display(fmt = "UnknownError")]
struct UnitError;
/// `InternalServerError` for `UnitError`
impl ResponseError for UnitError {}
/// `InternalServerError` for `JsonError`
impl ResponseError for JsonError {}
/// `InternalServerError` for `FormError`
impl ResponseError for FormError {}
/// `InternalServerError` for `TimerError`
impl ResponseError for TimerError {}
#[cfg(feature = "openssl")]
/// `InternalServerError` for `openssl::ssl::Error`
impl ResponseError for actix_connect::ssl::openssl::SslError {}
#[cfg(feature = "openssl")]
/// `InternalServerError` for `openssl::ssl::HandshakeError`
impl<T: std::fmt::Debug> ResponseError for actix_tls::openssl::HandshakeError<T> {}
/// Return `BAD_REQUEST` for `de::value::Error`
impl ResponseError for DeError {
fn error_response(&self) -> Response {
Response::new(StatusCode::BAD_REQUEST)
fn status_code(&self) -> StatusCode {
StatusCode::BAD_REQUEST
}
}
/// `InternalServerError` for `Canceled`
impl ResponseError for Canceled {}
/// `InternalServerError` for `BlockingError`
impl<E: fmt::Debug> ResponseError for BlockingError<E> {}
/// Return `BAD_REQUEST` for `Utf8Error`
impl ResponseError for Utf8Error {
fn error_response(&self) -> Response {
Response::new(StatusCode::BAD_REQUEST)
fn status_code(&self) -> StatusCode {
StatusCode::BAD_REQUEST
}
}
@ -142,35 +210,22 @@ impl ResponseError for HttpError {}
/// Return `InternalServerError` for `io::Error`
impl ResponseError for io::Error {
fn error_response(&self) -> Response {
fn status_code(&self) -> StatusCode {
match self.kind() {
io::ErrorKind::NotFound => Response::new(StatusCode::NOT_FOUND),
io::ErrorKind::PermissionDenied => Response::new(StatusCode::FORBIDDEN),
_ => Response::new(StatusCode::INTERNAL_SERVER_ERROR),
io::ErrorKind::NotFound => StatusCode::NOT_FOUND,
io::ErrorKind::PermissionDenied => StatusCode::FORBIDDEN,
_ => StatusCode::INTERNAL_SERVER_ERROR,
}
}
}
/// `BadRequest` for `InvalidHeaderValue`
impl ResponseError for header::InvalidHeaderValue {
fn error_response(&self) -> Response {
Response::new(StatusCode::BAD_REQUEST)
fn status_code(&self) -> StatusCode {
StatusCode::BAD_REQUEST
}
}
/// `BadRequest` for `InvalidHeaderValue`
impl ResponseError for header::InvalidHeaderValueBytes {
fn error_response(&self) -> Response {
Response::new(StatusCode::BAD_REQUEST)
}
}
/// `InternalServerError` for `futures::Canceled`
impl ResponseError for Canceled {}
// /// `InternalServerError` for `actix::MailboxError`
// impl ResponseError for MailboxError {}
/// A set of errors that can occur during parsing HTTP streams
#[derive(Debug, Display)]
pub enum ParseError {
@ -210,8 +265,8 @@ pub enum ParseError {
/// Return `BadRequest` for `ParseError`
impl ResponseError for ParseError {
fn error_response(&self) -> Response {
Response::new(StatusCode::BAD_REQUEST)
fn status_code(&self) -> StatusCode {
StatusCode::BAD_REQUEST
}
}
@ -279,6 +334,8 @@ pub enum PayloadError {
Io(io::Error),
}
impl std::error::Error for PayloadError {}
impl From<h2::Error> for PayloadError {
fn from(err: h2::Error) -> Self {
PayloadError::Http2Payload(err)
@ -303,7 +360,7 @@ impl From<BlockingError<io::Error>> for PayloadError {
BlockingError::Error(e) => PayloadError::Io(e),
BlockingError::Canceled => PayloadError::Io(io::Error::new(
io::ErrorKind::Other,
"Thread pool is gone",
"Operation is canceled",
)),
}
}
@ -314,19 +371,18 @@ impl From<BlockingError<io::Error>> for PayloadError {
/// - `Overflow` returns `PayloadTooLarge`
/// - Other errors returns `BadRequest`
impl ResponseError for PayloadError {
fn error_response(&self) -> Response {
fn status_code(&self) -> StatusCode {
match *self {
PayloadError::Overflow => Response::new(StatusCode::PAYLOAD_TOO_LARGE),
_ => Response::new(StatusCode::BAD_REQUEST),
PayloadError::Overflow => StatusCode::PAYLOAD_TOO_LARGE,
_ => StatusCode::BAD_REQUEST,
}
}
}
/// Return `BadRequest` for `cookie::ParseError`
#[cfg(feature = "cookies")]
impl ResponseError for cookie::ParseError {
fn error_response(&self) -> Response {
Response::new(StatusCode::BAD_REQUEST)
impl ResponseError for crate::cookie::ParseError {
fn status_code(&self) -> StatusCode {
StatusCode::BAD_REQUEST
}
}
@ -334,7 +390,10 @@ impl ResponseError for cookie::ParseError {
/// A set of errors that can occur during dispatching http requests
pub enum DispatchError {
/// Service error
Service,
Service(Error),
/// Upgrade service error
Upgrade,
/// An `io::Error` that occurred while trying to read or write to a network
/// stream.
@ -374,7 +433,7 @@ pub enum DispatchError {
Unknown,
}
/// A set of error that can occure during parsing content type
/// A set of error that can occur during parsing content type
#[derive(PartialEq, Debug, Display)]
pub enum ContentTypeError {
/// Can not parse content type
@ -385,13 +444,23 @@ pub enum ContentTypeError {
UnknownEncoding,
}
impl std::error::Error for ContentTypeError {}
/// Return `BadRequest` for `ContentTypeError`
impl ResponseError for ContentTypeError {
fn error_response(&self) -> Response {
Response::new(StatusCode::BAD_REQUEST)
fn status_code(&self) -> StatusCode {
StatusCode::BAD_REQUEST
}
}
impl<E, U: Encoder<I> + Decoder, I> ResponseError for FramedDispatcherError<E, U, I>
where
E: fmt::Debug + fmt::Display,
<U as Encoder<I>>::Error: fmt::Debug,
<U as Decoder>::Error: fmt::Debug,
{
}
/// Helper type that can wrap any error and generate custom response.
///
/// In following example any `io::Error` will be converted into "BAD REQUEST"
@ -399,14 +468,12 @@ impl ResponseError for ContentTypeError {
/// default.
///
/// ```rust
/// # extern crate actix_http;
/// # use std::io;
/// # use actix_http::*;
///
/// fn index(req: Request) -> Result<&'static str> {
/// Err(error::ErrorBadRequest(io::Error::new(io::ErrorKind::Other, "error")))
/// }
/// # fn main() {}
/// ```
pub struct InternalError<T> {
cause: T,
@ -440,7 +507,7 @@ impl<T> fmt::Debug for InternalError<T>
where
T: fmt::Debug + 'static,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Debug::fmt(&self.cause, f)
}
}
@ -449,7 +516,7 @@ impl<T> fmt::Display for InternalError<T>
where
T: fmt::Display + 'static,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&self.cause, f)
}
}
@ -458,9 +525,31 @@ impl<T> ResponseError for InternalError<T>
where
T: fmt::Debug + fmt::Display + 'static,
{
fn status_code(&self) -> StatusCode {
match self.status {
InternalErrorType::Status(st) => st,
InternalErrorType::Response(ref resp) => {
if let Some(resp) = resp.borrow().as_ref() {
resp.head().status
} else {
StatusCode::INTERNAL_SERVER_ERROR
}
}
}
}
fn error_response(&self) -> Response {
match self.status {
InternalErrorType::Status(st) => Response::new(st),
InternalErrorType::Status(st) => {
let mut res = Response::new(st);
let mut buf = BytesMut::new();
let _ = write!(Writer(&mut buf), "{}", self);
res.headers_mut().insert(
header::CONTENT_TYPE,
header::HeaderValue::from_static("text/plain; charset=utf-8"),
);
res.set_body(Body::from(buf))
}
InternalErrorType::Response(ref resp) => {
if let Some(resp) = resp.borrow_mut().take() {
resp
@ -472,13 +561,6 @@ where
}
}
/// Convert Response to a Error
impl From<Response> for Error {
fn from(res: Response) -> Error {
InternalError::from_response("", res).into()
}
}
/// Helper function that creates wrapper of any error and generate *BAD
/// REQUEST* response.
#[allow(non_snake_case)]
@ -869,24 +951,20 @@ where
InternalError::new(err, StatusCode::NETWORK_AUTHENTICATION_REQUIRED).into()
}
#[cfg(feature = "fail")]
mod failure_integration {
use super::*;
#[cfg(feature = "actors")]
/// `InternalServerError` for `actix::MailboxError`
/// This is supported on feature=`actors` only
impl ResponseError for actix::MailboxError {}
/// Compatibility for `failure::Error`
impl ResponseError for failure::Error {
fn error_response(&self) -> Response {
Response::new(StatusCode::INTERNAL_SERVER_ERROR)
}
}
}
#[cfg(feature = "actors")]
/// `InternalServerError` for `actix::ResolverError`
/// This is supported on feature=`actors` only
impl ResponseError for actix::actors::resolver::ResolverError {}
#[cfg(test)]
mod tests {
use super::*;
use http::{Error as HttpError, StatusCode};
use httparse;
use std::error::Error as StdError;
use std::io;
#[test]
@ -899,10 +977,8 @@ mod tests {
assert_eq!(resp.status(), StatusCode::INTERNAL_SERVER_ERROR);
}
#[cfg(feature = "cookies")]
#[test]
fn test_cookie_parse() {
use cookie::ParseError as CookieParseError;
let resp: Response = CookieParseError::EmptyName.error_response();
assert_eq!(resp.status(), StatusCode::BAD_REQUEST);
}
@ -917,7 +993,7 @@ mod tests {
#[test]
fn test_error_cause() {
let orig = io::Error::new(io::ErrorKind::Other, "other");
let desc = orig.description().to_owned();
let desc = orig.to_string();
let e = Error::from(orig);
assert_eq!(format!("{}", e.as_response_error()), desc);
}
@ -925,7 +1001,7 @@ mod tests {
#[test]
fn test_error_display() {
let orig = io::Error::new(io::ErrorKind::Other, "other");
let desc = orig.description().to_owned();
let desc = orig.to_string();
let e = Error::from(orig);
assert_eq!(format!("{}", e), desc);
}
@ -967,7 +1043,7 @@ mod tests {
match ParseError::from($from) {
e @ $error => {
let desc = format!("{}", e);
assert_eq!(desc, format!("IO error: {}", $from.description()));
assert_eq!(desc, format!("IO error: {}", $from));
}
_ => unreachable!("{:?}", $from),
}
@ -995,6 +1071,16 @@ mod tests {
assert_eq!(resp.status(), StatusCode::OK);
}
#[test]
fn test_error_casting() {
let err = PayloadError::Overflow;
let resp_err: &dyn ResponseError = &err;
let err = resp_err.downcast_ref::<PayloadError>().unwrap();
assert_eq!(err.to_string(), "A payload reached size limit.");
let not_err = resp_err.downcast_ref::<ContentTypeError>();
assert!(not_err.is_none());
}
#[test]
fn test_error_helpers() {
let r: Response = ErrorBadRequest("err").into();

View File

@ -1,12 +1,14 @@
use std::any::{Any, TypeId};
use std::fmt;
use hashbrown::HashMap;
use fxhash::FxHashMap;
#[derive(Default)]
/// A type map of request extensions.
pub struct Extensions {
map: HashMap<TypeId, Box<Any>>,
/// Use FxHasher with a std HashMap with for faster
/// lookups on the small `TypeId` (u64 equivalent) keys.
map: FxHashMap<TypeId, Box<dyn Any>>,
}
impl Extensions {
@ -14,7 +16,7 @@ impl Extensions {
#[inline]
pub fn new() -> Extensions {
Extensions {
map: HashMap::default(),
map: FxHashMap::default(),
}
}
@ -28,33 +30,30 @@ impl Extensions {
/// Check if container contains entry
pub fn contains<T: 'static>(&self) -> bool {
self.map.get(&TypeId::of::<T>()).is_some()
self.map.contains_key(&TypeId::of::<T>())
}
/// Get a reference to a type previously inserted on this `Extensions`.
pub fn get<T: 'static>(&self) -> Option<&T> {
self.map
.get(&TypeId::of::<T>())
.and_then(|boxed| (&**boxed as &(Any + 'static)).downcast_ref())
.and_then(|boxed| boxed.downcast_ref())
}
/// Get a mutable reference to a type previously inserted on this `Extensions`.
pub fn get_mut<T: 'static>(&mut self) -> Option<&mut T> {
self.map
.get_mut(&TypeId::of::<T>())
.and_then(|boxed| (&mut **boxed as &mut (Any + 'static)).downcast_mut())
.and_then(|boxed| boxed.downcast_mut())
}
/// Remove a type from this `Extensions`.
///
/// If a extension of this type existed, it will be returned.
pub fn remove<T: 'static>(&mut self) -> Option<T> {
self.map.remove(&TypeId::of::<T>()).and_then(|boxed| {
(boxed as Box<Any + 'static>)
.downcast()
.ok()
.map(|boxed| *boxed)
})
self.map
.remove(&TypeId::of::<T>())
.and_then(|boxed| boxed.downcast().ok().map(|boxed| *boxed))
}
/// Clear the `Extensions` of all inserted extensions.
@ -65,27 +64,118 @@ impl Extensions {
}
impl fmt::Debug for Extensions {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Extensions").finish()
}
}
#[test]
fn test_extensions() {
#[derive(Debug, PartialEq)]
struct MyType(i32);
#[cfg(test)]
mod tests {
use super::*;
let mut extensions = Extensions::new();
#[test]
fn test_remove() {
let mut map = Extensions::new();
extensions.insert(5i32);
extensions.insert(MyType(10));
map.insert::<i8>(123);
assert!(map.get::<i8>().is_some());
assert_eq!(extensions.get(), Some(&5i32));
assert_eq!(extensions.get_mut(), Some(&mut 5i32));
map.remove::<i8>();
assert!(map.get::<i8>().is_none());
}
assert_eq!(extensions.remove::<i32>(), Some(5i32));
assert!(extensions.get::<i32>().is_none());
#[test]
fn test_clear() {
let mut map = Extensions::new();
assert_eq!(extensions.get::<bool>(), None);
assert_eq!(extensions.get(), Some(&MyType(10)));
map.insert::<i8>(8);
map.insert::<i16>(16);
map.insert::<i32>(32);
assert!(map.contains::<i8>());
assert!(map.contains::<i16>());
assert!(map.contains::<i32>());
map.clear();
assert!(!map.contains::<i8>());
assert!(!map.contains::<i16>());
assert!(!map.contains::<i32>());
map.insert::<i8>(10);
assert_eq!(*map.get::<i8>().unwrap(), 10);
}
#[test]
fn test_integers() {
let mut map = Extensions::new();
map.insert::<i8>(8);
map.insert::<i16>(16);
map.insert::<i32>(32);
map.insert::<i64>(64);
map.insert::<i128>(128);
map.insert::<u8>(8);
map.insert::<u16>(16);
map.insert::<u32>(32);
map.insert::<u64>(64);
map.insert::<u128>(128);
assert!(map.get::<i8>().is_some());
assert!(map.get::<i16>().is_some());
assert!(map.get::<i32>().is_some());
assert!(map.get::<i64>().is_some());
assert!(map.get::<i128>().is_some());
assert!(map.get::<u8>().is_some());
assert!(map.get::<u16>().is_some());
assert!(map.get::<u32>().is_some());
assert!(map.get::<u64>().is_some());
assert!(map.get::<u128>().is_some());
}
#[test]
fn test_composition() {
struct Magi<T>(pub T);
struct Madoka {
pub god: bool,
}
struct Homura {
pub attempts: usize,
}
struct Mami {
pub guns: usize,
}
let mut map = Extensions::new();
map.insert(Magi(Madoka { god: false }));
map.insert(Magi(Homura { attempts: 0 }));
map.insert(Magi(Mami { guns: 999 }));
assert!(!map.get::<Magi<Madoka>>().unwrap().0.god);
assert_eq!(0, map.get::<Magi<Homura>>().unwrap().0.attempts);
assert_eq!(999, map.get::<Magi<Mami>>().unwrap().0.guns);
}
#[test]
fn test_extensions() {
#[derive(Debug, PartialEq)]
struct MyType(i32);
let mut extensions = Extensions::new();
extensions.insert(5i32);
extensions.insert(MyType(10));
assert_eq!(extensions.get(), Some(&5i32));
assert_eq!(extensions.get_mut(), Some(&mut 5i32));
assert_eq!(extensions.remove::<i32>(), Some(5i32));
assert!(extensions.get::<i32>().is_none());
assert_eq!(extensions.get::<bool>(), None);
assert_eq!(extensions.get(), Some(&MyType(10)));
}
}

View File

@ -1,22 +1,17 @@
#![allow(unused_imports, unused_variables, dead_code)]
use std::io::{self, Write};
use std::io;
use actix_codec::{Decoder, Encoder};
use bitflags::bitflags;
use bytes::{BufMut, Bytes, BytesMut};
use http::header::{
HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING, UPGRADE,
};
use bytes::{Bytes, BytesMut};
use http::{Method, Version};
use super::decoder::{PayloadDecoder, PayloadItem, PayloadType};
use super::{decoder, encoder};
use super::{decoder, encoder, reserve_readbuf};
use super::{Message, MessageType};
use crate::body::BodySize;
use crate::config::ServiceConfig;
use crate::error::{ParseError, PayloadError};
use crate::helpers;
use crate::message::{ConnectionType, Head, MessagePool, RequestHead, ResponseHead};
use crate::message::{ConnectionType, RequestHeadType, ResponseHead};
bitflags! {
struct Flags: u8 {
@ -26,8 +21,6 @@ bitflags! {
}
}
const AVERAGE_HEADER_SIZE: usize = 30;
/// HTTP/1 Codec
pub struct ClientCodec {
inner: ClientCodecInner,
@ -47,8 +40,7 @@ struct ClientCodecInner {
// encoder part
flags: Flags,
headers_size: u32,
encoder: encoder::MessageEncoder<RequestHead>,
encoder: encoder::MessageEncoder<RequestHeadType>,
}
impl Default for ClientCodec {
@ -76,7 +68,6 @@ impl ClientCodec {
ctype: ConnectionType::Close,
flags,
headers_size: 0,
encoder: encoder::MessageEncoder::default(),
},
}
@ -150,6 +141,7 @@ impl Decoder for ClientCodec {
} else {
self.inner.payload = None;
}
reserve_readbuf(src);
Ok(Some(req))
} else {
Ok(None)
@ -168,7 +160,10 @@ impl Decoder for ClientPayloadCodec {
);
Ok(match self.inner.payload.as_mut().unwrap().decode(src)? {
Some(PayloadItem::Chunk(chunk)) => Some(Some(chunk)),
Some(PayloadItem::Chunk(chunk)) => {
reserve_readbuf(src);
Some(Some(chunk))
}
Some(PayloadItem::Eof) => {
self.inner.payload.take();
Some(None)
@ -178,23 +173,24 @@ impl Decoder for ClientPayloadCodec {
}
}
impl Encoder for ClientCodec {
type Item = Message<(RequestHead, BodySize)>;
impl Encoder<Message<(RequestHeadType, BodySize)>> for ClientCodec {
type Error = io::Error;
fn encode(
&mut self,
item: Self::Item,
item: Message<(RequestHeadType, BodySize)>,
dst: &mut BytesMut,
) -> Result<(), Self::Error> {
match item {
Message::Item((mut msg, length)) => {
Message::Item((mut head, length)) => {
let inner = &mut self.inner;
inner.version = msg.version;
inner.flags.set(Flags::HEAD, msg.method == Method::HEAD);
inner.version = head.as_ref().version;
inner
.flags
.set(Flags::HEAD, head.as_ref().method == Method::HEAD);
// connection status
inner.ctype = match msg.connection_type() {
inner.ctype = match head.as_ref().connection_type() {
ConnectionType::KeepAlive => {
if inner.flags.contains(Flags::KEEPALIVE_ENABLED) {
ConnectionType::KeepAlive
@ -208,7 +204,7 @@ impl Encoder for ClientCodec {
inner.encoder.encode(
dst,
&mut msg,
&mut head,
false,
false,
inner.version,

View File

@ -1,12 +1,9 @@
#![allow(unused_imports, unused_variables, dead_code)]
use std::fmt;
use std::io::{self, Write};
use std::{fmt, io};
use actix_codec::{Decoder, Encoder};
use bitflags::bitflags;
use bytes::{BufMut, Bytes, BytesMut};
use http::header::{HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING};
use http::{Method, StatusCode, Version};
use bytes::BytesMut;
use http::{Method, Version};
use super::decoder::{PayloadDecoder, PayloadItem, PayloadType};
use super::{decoder, encoder};
@ -14,21 +11,18 @@ use super::{Message, MessageType};
use crate::body::BodySize;
use crate::config::ServiceConfig;
use crate::error::ParseError;
use crate::helpers;
use crate::message::{ConnectionType, Head, ResponseHead};
use crate::message::ConnectionType;
use crate::request::Request;
use crate::response::Response;
bitflags! {
struct Flags: u8 {
const HEAD = 0b0000_0001;
const KEEPALIVE_ENABLED = 0b0000_1000;
const STREAM = 0b0001_0000;
const KEEPALIVE_ENABLED = 0b0000_0010;
const STREAM = 0b0000_0100;
}
}
const AVERAGE_HEADER_SIZE: usize = 30;
/// HTTP/1 Codec
pub struct Codec {
config: ServiceConfig,
@ -39,7 +33,6 @@ pub struct Codec {
// encoder part
flags: Flags,
headers_size: u32,
encoder: encoder::MessageEncoder<Response<()>>,
}
@ -50,7 +43,7 @@ impl Default for Codec {
}
impl fmt::Debug for Codec {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "h1::Codec({:?})", self.flags)
}
}
@ -67,27 +60,34 @@ impl Codec {
};
Codec {
config,
flags,
decoder: decoder::MessageDecoder::default(),
payload: None,
version: Version::HTTP_11,
ctype: ConnectionType::Close,
flags,
headers_size: 0,
encoder: encoder::MessageEncoder::default(),
}
}
#[inline]
/// Check if request is upgrade
pub fn upgrade(&self) -> bool {
self.ctype == ConnectionType::Upgrade
}
#[inline]
/// Check if last response is keep-alive
pub fn keepalive(&self) -> bool {
self.ctype == ConnectionType::KeepAlive
}
#[inline]
/// Check if keep-alive enabled on server level
pub fn keepalive_enabled(&self) -> bool {
self.flags.contains(Flags::KEEPALIVE_ENABLED)
}
#[inline]
/// Check last request's message type
pub fn message_type(&self) -> MessageType {
if self.flags.contains(Flags::STREAM) {
@ -98,6 +98,11 @@ impl Codec {
MessageType::Payload
}
}
#[inline]
pub fn config(&self) -> &ServiceConfig {
&self.config
}
}
impl Decoder for Codec {
@ -139,13 +144,12 @@ impl Decoder for Codec {
}
}
impl Encoder for Codec {
type Item = Message<(Response<()>, BodySize)>;
impl Encoder<Message<(Response<()>, BodySize)>> for Codec {
type Error = io::Error;
fn encode(
&mut self,
item: Self::Item,
item: Message<(Response<()>, BodySize)>,
dst: &mut BytesMut,
) -> Result<(), Self::Error> {
match item {
@ -165,7 +169,6 @@ impl Encoder for Codec {
};
// encode message
let len = dst.len();
self.encoder.encode(
dst,
&mut res,
@ -176,7 +179,7 @@ impl Encoder for Codec {
self.ctype,
&self.config,
)?;
self.headers_size = (dst.len() - len) as u32;
// self.headers_size = (dst.len() - len) as u32;
}
Message::Chunk(Some(bytes)) => {
self.encoder.encode_chunk(bytes.as_ref(), dst)?;
@ -191,17 +194,11 @@ impl Encoder for Codec {
#[cfg(test)]
mod tests {
use std::{cmp, io};
use actix_codec::{AsyncRead, AsyncWrite};
use bytes::{Buf, Bytes, BytesMut};
use http::{Method, Version};
use bytes::BytesMut;
use http::Method;
use super::*;
use crate::error::ParseError;
use crate::h1::Message;
use crate::httpmessage::HttpMessage;
use crate::request::Request;
#[test]
fn test_http_request_chunked_payload_and_next_message() {

View File

@ -1,22 +1,23 @@
use std::convert::TryFrom;
use std::io;
use std::marker::PhantomData;
use std::{io, mem};
use std::task::Poll;
use actix_codec::Decoder;
use bytes::{Bytes, BytesMut};
use futures::{Async, Poll};
use bytes::{Buf, Bytes, BytesMut};
use http::header::{HeaderName, HeaderValue};
use http::{header, HeaderMap, HttpTryFrom, Method, StatusCode, Uri, Version};
use httparse;
use http::{header, Method, StatusCode, Uri, Version};
use log::{debug, error, trace};
use crate::error::ParseError;
use crate::header::HeaderMap;
use crate::message::{ConnectionType, ResponseHead};
use crate::request::Request;
const MAX_BUFFER_SIZE: usize = 131_072;
const MAX_HEADERS: usize = 96;
/// Incoming messagd decoder
/// Incoming message decoder
pub(crate) struct MessageDecoder<T: MessageType>(PhantomData<T>);
#[derive(Debug)]
@ -44,13 +45,15 @@ impl<T: MessageType> Decoder for MessageDecoder<T> {
pub(crate) enum PayloadLength {
Payload(PayloadType),
Upgrade,
UpgradeWebSocket,
None,
}
pub(crate) trait MessageType: Sized {
fn set_connection_type(&mut self, ctype: Option<ConnectionType>);
fn set_expect(&mut self);
fn headers_mut(&mut self) -> &mut HeaderMap;
fn decode(src: &mut BytesMut) -> Result<Option<(Self, PayloadType)>, ParseError>;
@ -61,7 +64,8 @@ pub(crate) trait MessageType: Sized {
raw_headers: &[HeaderIndex],
) -> Result<PayloadLength, ParseError> {
let mut ka = None;
let mut has_upgrade = false;
let mut has_upgrade_websocket = false;
let mut expect = false;
let mut chunked = false;
let mut content_length = None;
@ -69,73 +73,80 @@ pub(crate) trait MessageType: Sized {
let headers = self.headers_mut();
for idx in raw_headers.iter() {
if let Ok(name) = HeaderName::from_bytes(&slice[idx.name.0..idx.name.1])
{
// Unsafe: httparse check header value for valid utf-8
let value = unsafe {
HeaderValue::from_shared_unchecked(
slice.slice(idx.value.0, idx.value.1),
)
};
match name {
header::CONTENT_LENGTH => {
if let Ok(s) = value.to_str() {
if let Ok(len) = s.parse::<u64>() {
let name =
HeaderName::from_bytes(&slice[idx.name.0..idx.name.1]).unwrap();
// SAFETY: httparse already checks header value is only visible ASCII bytes
// from_maybe_shared_unchecked contains debug assertions so they are omitted here
let value = unsafe {
HeaderValue::from_maybe_shared_unchecked(
slice.slice(idx.value.0..idx.value.1),
)
};
match name {
header::CONTENT_LENGTH => {
if let Ok(s) = value.to_str() {
if let Ok(len) = s.parse::<u64>() {
if len != 0 {
content_length = Some(len);
} else {
debug!("illegal Content-Length: {:?}", s);
return Err(ParseError::Header);
}
} else {
debug!("illegal Content-Length: {:?}", value);
debug!("illegal Content-Length: {:?}", s);
return Err(ParseError::Header);
}
} else {
debug!("illegal Content-Length: {:?}", value);
return Err(ParseError::Header);
}
// transfer-encoding
header::TRANSFER_ENCODING => {
if let Ok(s) = value.to_str().map(|s| s.trim()) {
chunked = s.eq_ignore_ascii_case("chunked");
} else {
return Err(ParseError::Header);
}
}
// transfer-encoding
header::TRANSFER_ENCODING => {
if let Ok(s) = value.to_str().map(|s| s.trim()) {
chunked = s.eq_ignore_ascii_case("chunked");
} else {
return Err(ParseError::Header);
}
// connection keep-alive state
header::CONNECTION => {
ka = if let Ok(conn) = value.to_str().map(|conn| conn.trim())
{
if conn.eq_ignore_ascii_case("keep-alive") {
Some(ConnectionType::KeepAlive)
} else if conn.eq_ignore_ascii_case("close") {
Some(ConnectionType::Close)
} else if conn.eq_ignore_ascii_case("upgrade") {
Some(ConnectionType::Upgrade)
} else {
None
}
}
// connection keep-alive state
header::CONNECTION => {
ka = if let Ok(conn) = value.to_str().map(|conn| conn.trim()) {
if conn.eq_ignore_ascii_case("keep-alive") {
Some(ConnectionType::KeepAlive)
} else if conn.eq_ignore_ascii_case("close") {
Some(ConnectionType::Close)
} else if conn.eq_ignore_ascii_case("upgrade") {
Some(ConnectionType::Upgrade)
} else {
None
};
}
header::UPGRADE => {
has_upgrade = true;
// check content-length, some clients (dart)
// sends "content-length: 0" with websocket upgrade
if let Ok(val) = value.to_str().map(|val| val.trim()) {
if val.eq_ignore_ascii_case("websocket") {
content_length = None;
}
}
} else {
None
};
}
header::UPGRADE => {
if let Ok(val) = value.to_str().map(|val| val.trim()) {
if val.eq_ignore_ascii_case("websocket") {
has_upgrade_websocket = true;
}
}
_ => (),
}
headers.append(name, value);
} else {
return Err(ParseError::Header);
header::EXPECT => {
let bytes = value.as_bytes();
if bytes.len() >= 4 && &bytes[0..4] == b"100-" {
expect = true;
}
}
_ => (),
}
headers.append(name, value);
}
}
self.set_connection_type(ka);
if expect {
self.set_expect()
}
// https://tools.ietf.org/html/rfc7230#section-3.3.3
if chunked {
@ -143,13 +154,13 @@ pub(crate) trait MessageType: Sized {
Ok(PayloadLength::Payload(PayloadType::Payload(
PayloadDecoder::chunked(),
)))
} else if has_upgrade_websocket {
Ok(PayloadLength::UpgradeWebSocket)
} else if let Some(len) = content_length {
// Content-Length
Ok(PayloadLength::Payload(PayloadType::Payload(
PayloadDecoder::length(len),
)))
} else if has_upgrade {
Ok(PayloadLength::Upgrade)
} else {
Ok(PayloadLength::None)
}
@ -163,18 +174,19 @@ impl MessageType for Request {
}
}
fn set_expect(&mut self) {
self.head_mut().set_expect();
}
fn headers_mut(&mut self) -> &mut HeaderMap {
&mut self.head_mut().headers
}
fn decode(src: &mut BytesMut) -> Result<Option<(Self, PayloadType)>, ParseError> {
// Unsafe: we read only this data only after httparse parses headers into.
// performance bump for pipeline benchmarks.
let mut headers: [HeaderIndex; MAX_HEADERS] = unsafe { mem::uninitialized() };
let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY;
let (len, method, uri, ver, h_len) = {
let mut parsed: [httparse::Header; MAX_HEADERS] =
unsafe { mem::uninitialized() };
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = EMPTY_HEADER_ARRAY;
let mut req = httparse::Request::new(&mut parsed);
match req.parse(src)? {
@ -198,12 +210,12 @@ impl MessageType for Request {
let mut msg = Request::new();
// convert headers
let len = msg.set_headers(&src.split_to(len).freeze(), &headers[..h_len])?;
let length = msg.set_headers(&src.split_to(len).freeze(), &headers[..h_len])?;
// payload decoder
let decoder = match len {
let decoder = match length {
PayloadLength::Payload(pl) => pl,
PayloadLength::Upgrade => {
PayloadLength::UpgradeWebSocket => {
// upgrade(websocket)
PayloadType::Stream(PayloadDecoder::eof())
}
@ -235,18 +247,17 @@ impl MessageType for ResponseHead {
}
}
fn set_expect(&mut self) {}
fn headers_mut(&mut self) -> &mut HeaderMap {
&mut self.headers
}
fn decode(src: &mut BytesMut) -> Result<Option<(Self, PayloadType)>, ParseError> {
// Unsafe: we read only this data only after httparse parses headers into.
// performance bump for pipeline benchmarks.
let mut headers: [HeaderIndex; MAX_HEADERS] = unsafe { mem::uninitialized() };
let mut headers: [HeaderIndex; MAX_HEADERS] = EMPTY_HEADER_INDEX_ARRAY;
let (len, ver, status, h_len) = {
let mut parsed: [httparse::Header; MAX_HEADERS] =
unsafe { mem::uninitialized() };
let mut parsed: [httparse::Header<'_>; MAX_HEADERS] = EMPTY_HEADER_ARRAY;
let mut res = httparse::Response::new(&mut parsed);
match res.parse(src)? {
@ -266,13 +277,14 @@ impl MessageType for ResponseHead {
}
};
let mut msg = ResponseHead::default();
let mut msg = ResponseHead::new(status);
msg.version = ver;
// convert headers
let len = msg.set_headers(&src.split_to(len).freeze(), &headers[..h_len])?;
let length = msg.set_headers(&src.split_to(len).freeze(), &headers[..h_len])?;
// message payload
let decoder = if let PayloadLength::Payload(pl) = len {
let decoder = if let PayloadLength::Payload(pl) = length {
pl
} else if status == StatusCode::SWITCHING_PROTOCOLS {
// switching protocol or connect
@ -281,12 +293,15 @@ impl MessageType for ResponseHead {
error!("MAX_BUFFER_SIZE unprocessed data reached, closing");
return Err(ParseError::TooLarge);
} else {
PayloadType::None
// for HTTP/1.0 read to eof and close connection
if msg.version == Version::HTTP_10 {
msg.set_connection_type(ConnectionType::Close);
PayloadType::Payload(PayloadDecoder::eof())
} else {
PayloadType::None
}
};
msg.status = status;
msg.version = ver;
Ok(Some((msg, decoder)))
}
}
@ -297,10 +312,21 @@ pub(crate) struct HeaderIndex {
pub(crate) value: (usize, usize),
}
pub(crate) const EMPTY_HEADER_INDEX: HeaderIndex = HeaderIndex {
name: (0, 0),
value: (0, 0),
};
pub(crate) const EMPTY_HEADER_INDEX_ARRAY: [HeaderIndex; MAX_HEADERS] =
[EMPTY_HEADER_INDEX; MAX_HEADERS];
pub(crate) const EMPTY_HEADER_ARRAY: [httparse::Header<'static>; MAX_HEADERS] =
[httparse::EMPTY_HEADER; MAX_HEADERS];
impl HeaderIndex {
pub(crate) fn record(
bytes: &[u8],
headers: &[httparse::Header],
headers: &[httparse::Header<'_>],
indices: &mut [HeaderIndex],
) {
let bytes_ptr = bytes.as_ptr() as usize;
@ -315,7 +341,7 @@ impl HeaderIndex {
}
}
#[derive(Debug, Clone)]
#[derive(Debug, Clone, PartialEq)]
/// Http payload item
pub enum PayloadItem {
Chunk(Bytes),
@ -403,7 +429,7 @@ impl Decoder for PayloadDecoder {
let len = src.len() as u64;
let buf;
if *remaining > len {
buf = src.take().freeze();
buf = src.split().freeze();
*remaining -= len;
} else {
buf = src.split_to(*remaining as usize).freeze();
@ -417,9 +443,10 @@ impl Decoder for PayloadDecoder {
loop {
let mut buf = None;
// advances the chunked state
*state = match state.step(src, size, &mut buf)? {
Async::NotReady => return Ok(None),
Async::Ready(state) => state,
*state = match state.step(src, size, &mut buf) {
Poll::Pending => return Ok(None),
Poll::Ready(Ok(state)) => state,
Poll::Ready(Err(e)) => return Err(e),
};
if *state == ChunkedState::End {
trace!("End of chunked stream");
@ -437,7 +464,7 @@ impl Decoder for PayloadDecoder {
if src.is_empty() {
Ok(None)
} else {
Ok(Some(PayloadItem::Chunk(src.take().freeze())))
Ok(Some(PayloadItem::Chunk(src.split().freeze())))
}
}
}
@ -448,10 +475,10 @@ macro_rules! byte (
($rdr:ident) => ({
if $rdr.len() > 0 {
let b = $rdr[0];
$rdr.split_to(1);
$rdr.advance(1);
b
} else {
return Ok(Async::NotReady)
return Poll::Pending
}
})
);
@ -462,7 +489,7 @@ impl ChunkedState {
body: &mut BytesMut,
size: &mut u64,
buf: &mut Option<Bytes>,
) -> Poll<ChunkedState, io::Error> {
) -> Poll<Result<ChunkedState, io::Error>> {
use self::ChunkedState::*;
match *self {
Size => ChunkedState::read_size(body, size),
@ -474,66 +501,71 @@ impl ChunkedState {
BodyLf => ChunkedState::read_body_lf(body),
EndCr => ChunkedState::read_end_cr(body),
EndLf => ChunkedState::read_end_lf(body),
End => Ok(Async::Ready(ChunkedState::End)),
End => Poll::Ready(Ok(ChunkedState::End)),
}
}
fn read_size(rdr: &mut BytesMut, size: &mut u64) -> Poll<ChunkedState, io::Error> {
fn read_size(
rdr: &mut BytesMut,
size: &mut u64,
) -> Poll<Result<ChunkedState, io::Error>> {
let radix = 16;
match byte!(rdr) {
b @ b'0'...b'9' => {
b @ b'0'..=b'9' => {
*size *= radix;
*size += u64::from(b - b'0');
}
b @ b'a'...b'f' => {
b @ b'a'..=b'f' => {
*size *= radix;
*size += u64::from(b + 10 - b'a');
}
b @ b'A'...b'F' => {
b @ b'A'..=b'F' => {
*size *= radix;
*size += u64::from(b + 10 - b'A');
}
b'\t' | b' ' => return Ok(Async::Ready(ChunkedState::SizeLws)),
b';' => return Ok(Async::Ready(ChunkedState::Extension)),
b'\r' => return Ok(Async::Ready(ChunkedState::SizeLf)),
b'\t' | b' ' => return Poll::Ready(Ok(ChunkedState::SizeLws)),
b';' => return Poll::Ready(Ok(ChunkedState::Extension)),
b'\r' => return Poll::Ready(Ok(ChunkedState::SizeLf)),
_ => {
return Err(io::Error::new(
return Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size line: Invalid Size",
));
)));
}
}
Ok(Async::Ready(ChunkedState::Size))
Poll::Ready(Ok(ChunkedState::Size))
}
fn read_size_lws(rdr: &mut BytesMut) -> Poll<ChunkedState, io::Error> {
fn read_size_lws(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
trace!("read_size_lws");
match byte!(rdr) {
// LWS can follow the chunk size, but no more digits can come
b'\t' | b' ' => Ok(Async::Ready(ChunkedState::SizeLws)),
b';' => Ok(Async::Ready(ChunkedState::Extension)),
b'\r' => Ok(Async::Ready(ChunkedState::SizeLf)),
_ => Err(io::Error::new(
b'\t' | b' ' => Poll::Ready(Ok(ChunkedState::SizeLws)),
b';' => Poll::Ready(Ok(ChunkedState::Extension)),
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size linear white space",
)),
))),
}
}
fn read_extension(rdr: &mut BytesMut) -> Poll<ChunkedState, io::Error> {
fn read_extension(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Ok(Async::Ready(ChunkedState::SizeLf)),
_ => Ok(Async::Ready(ChunkedState::Extension)), // no supported extensions
b'\r' => Poll::Ready(Ok(ChunkedState::SizeLf)),
_ => Poll::Ready(Ok(ChunkedState::Extension)), // no supported extensions
}
}
fn read_size_lf(
rdr: &mut BytesMut,
size: &mut u64,
) -> Poll<ChunkedState, io::Error> {
) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' if *size > 0 => Ok(Async::Ready(ChunkedState::Body)),
b'\n' if *size == 0 => Ok(Async::Ready(ChunkedState::EndCr)),
_ => Err(io::Error::new(
b'\n' if *size > 0 => Poll::Ready(Ok(ChunkedState::Body)),
b'\n' if *size == 0 => Poll::Ready(Ok(ChunkedState::EndCr)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk size LF",
)),
))),
}
}
@ -541,16 +573,16 @@ impl ChunkedState {
rdr: &mut BytesMut,
rem: &mut u64,
buf: &mut Option<Bytes>,
) -> Poll<ChunkedState, io::Error> {
) -> Poll<Result<ChunkedState, io::Error>> {
trace!("Chunked read, remaining={:?}", rem);
let len = rdr.len() as u64;
if len == 0 {
Ok(Async::Ready(ChunkedState::Body))
Poll::Ready(Ok(ChunkedState::Body))
} else {
let slice;
if *rem > len {
slice = rdr.take().freeze();
slice = rdr.split().freeze();
*rem -= len;
} else {
slice = rdr.split_to(*rem as usize).freeze();
@ -558,47 +590,47 @@ impl ChunkedState {
}
*buf = Some(slice);
if *rem > 0 {
Ok(Async::Ready(ChunkedState::Body))
Poll::Ready(Ok(ChunkedState::Body))
} else {
Ok(Async::Ready(ChunkedState::BodyCr))
Poll::Ready(Ok(ChunkedState::BodyCr))
}
}
}
fn read_body_cr(rdr: &mut BytesMut) -> Poll<ChunkedState, io::Error> {
fn read_body_cr(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Ok(Async::Ready(ChunkedState::BodyLf)),
_ => Err(io::Error::new(
b'\r' => Poll::Ready(Ok(ChunkedState::BodyLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk body CR",
)),
))),
}
}
fn read_body_lf(rdr: &mut BytesMut) -> Poll<ChunkedState, io::Error> {
fn read_body_lf(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' => Ok(Async::Ready(ChunkedState::Size)),
_ => Err(io::Error::new(
b'\n' => Poll::Ready(Ok(ChunkedState::Size)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk body LF",
)),
))),
}
}
fn read_end_cr(rdr: &mut BytesMut) -> Poll<ChunkedState, io::Error> {
fn read_end_cr(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\r' => Ok(Async::Ready(ChunkedState::EndLf)),
_ => Err(io::Error::new(
b'\r' => Poll::Ready(Ok(ChunkedState::EndLf)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk end CR",
)),
))),
}
}
fn read_end_lf(rdr: &mut BytesMut) -> Poll<ChunkedState, io::Error> {
fn read_end_lf(rdr: &mut BytesMut) -> Poll<Result<ChunkedState, io::Error>> {
match byte!(rdr) {
b'\n' => Ok(Async::Ready(ChunkedState::End)),
_ => Err(io::Error::new(
b'\n' => Poll::Ready(Ok(ChunkedState::End)),
_ => Poll::Ready(Err(io::Error::new(
io::ErrorKind::InvalidInput,
"Invalid chunk end LF",
)),
))),
}
}
}
@ -610,6 +642,7 @@ mod tests {
use super::*;
use crate::error::ParseError;
use crate::http::header::{HeaderName, SET_COOKIE};
use crate::httpmessage::HttpMessage;
impl PayloadType {
@ -621,10 +654,7 @@ mod tests {
}
fn is_unhandled(&self) -> bool {
match self {
PayloadType::Stream(_) => true,
_ => false,
}
matches!(self, PayloadType::Stream(_))
}
}
@ -636,10 +666,7 @@ mod tests {
}
}
fn eof(&self) -> bool {
match *self {
PayloadItem::Eof => true,
_ => false,
}
matches!(*self, PayloadItem::Eof)
}
}
@ -770,7 +797,13 @@ mod tests {
assert_eq!(req.version(), Version::HTTP_11);
assert_eq!(*req.method(), Method::GET);
assert_eq!(req.path(), "/test");
assert_eq!(req.headers().get("test").unwrap().as_bytes(), b"value");
assert_eq!(
req.headers()
.get(HeaderName::try_from("test").unwrap())
.unwrap()
.as_bytes(),
b"value"
);
}
#[test]
@ -785,12 +818,11 @@ mod tests {
let val: Vec<_> = req
.headers()
.get_all("Set-Cookie")
.iter()
.get_all(SET_COOKIE)
.map(|v| v.to_str().unwrap().to_owned())
.collect();
assert_eq!(val[0], "c1=cookie1");
assert_eq!(val[1], "c2=cookie2");
assert_eq!(val[1], "c1=cookie1");
assert_eq!(val[0], "c2=cookie2");
}
#[test]
@ -940,7 +972,7 @@ mod tests {
unreachable!("Error");
}
// type in chunked
// intentional typo in "chunked"
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
transfer-encoding: chnked\r\n\r\n",
@ -1001,7 +1033,7 @@ mod tests {
}
#[test]
fn test_http_request_upgrade() {
fn test_http_request_upgrade_websocket() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
connection: upgrade\r\n\
@ -1015,6 +1047,26 @@ mod tests {
assert!(pl.is_unhandled());
}
#[test]
fn test_http_request_upgrade_h2c() {
let mut buf = BytesMut::from(
"GET /test HTTP/1.1\r\n\
connection: upgrade, http2-settings\r\n\
upgrade: h2c\r\n\
http2-settings: dummy\r\n\r\n",
);
let mut reader = MessageDecoder::<Request>::default();
let (req, pl) = reader.decode(&mut buf).unwrap().unwrap();
// `connection: upgrade, http2-settings` doesn't work properly..
// see MessageType::set_headers().
//
// The line below should be:
// assert_eq!(req.head().connection_type(), ConnectionType::Upgrade);
assert_eq!(req.head().connection_type(), ConnectionType::KeepAlive);
assert!(req.upgrade());
assert!(!pl.is_unhandled());
}
#[test]
fn test_http_request_parser_utf8() {
let mut buf = BytesMut::from(
@ -1169,4 +1221,16 @@ mod tests {
let msg = pl.decode(&mut buf).unwrap().unwrap();
assert!(msg.eof());
}
#[test]
fn test_response_http10_read_until_eof() {
let mut buf = BytesMut::from(&"HTTP/1.0 200 Ok\r\n\r\ntest data"[..]);
let mut reader = MessageDecoder::<ResponseHead>::default();
let (_msg, pl) = reader.decode(&mut buf).unwrap().unwrap();
let mut pl = pl.unwrap();
let chunk = pl.decode(&mut buf).unwrap().unwrap();
assert_eq!(chunk, PayloadItem::Chunk(Bytes::from_static(b"test data")));
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,22 +1,18 @@
#![allow(unused_imports, unused_variables, dead_code)]
use std::fmt::Write as FmtWrite;
use std::io::Write;
use std::marker::PhantomData;
use std::str::FromStr;
use std::{cmp, fmt, io, mem};
use std::ptr::copy_nonoverlapping;
use std::slice::from_raw_parts_mut;
use std::{cmp, io};
use bytes::{BufMut, Bytes, BytesMut};
use http::header::{
HeaderValue, ACCEPT_ENCODING, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING,
};
use http::{HeaderMap, Method, StatusCode, Version};
use bytes::{BufMut, BytesMut};
use crate::body::BodySize;
use crate::config::ServiceConfig;
use crate::header::ContentEncoding;
use crate::header::map;
use crate::helpers;
use crate::message::{ConnectionType, Head, RequestHead, ResponseHead};
use crate::request::Request;
use crate::http::header::{CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING};
use crate::http::{HeaderMap, StatusCode, Version};
use crate::message::{ConnectionType, RequestHeadType};
use crate::response::Response;
const AVERAGE_HEADER_SIZE: usize = 30;
@ -41,10 +37,14 @@ impl<T: MessageType> Default for MessageEncoder<T> {
pub(crate) trait MessageType: Sized {
fn status(&self) -> Option<StatusCode>;
// fn connection_type(&self) -> Option<ConnectionType>;
fn headers(&self) -> &HeaderMap;
fn extra_headers(&self) -> Option<&HeaderMap>;
fn camel_case(&self) -> bool {
false
}
fn chunked(&self) -> bool;
fn encode_status(&mut self, dst: &mut BytesMut) -> io::Result<()>;
@ -59,6 +59,7 @@ pub(crate) trait MessageType: Sized {
) -> io::Result<()> {
let chunked = self.chunked();
let mut skip_len = length != BodySize::Stream;
let camel_case = self.camel_case();
// Content length
if let Some(status) = self.status() {
@ -76,76 +77,185 @@ pub(crate) trait MessageType: Sized {
match length {
BodySize::Stream => {
if chunked {
dst.extend_from_slice(b"\r\ntransfer-encoding: chunked\r\n")
if camel_case {
dst.put_slice(b"\r\nTransfer-Encoding: chunked\r\n")
} else {
dst.put_slice(b"\r\ntransfer-encoding: chunked\r\n")
}
} else {
skip_len = false;
dst.extend_from_slice(b"\r\n");
dst.put_slice(b"\r\n");
}
}
BodySize::Empty => {
dst.extend_from_slice(b"\r\ncontent-length: 0\r\n");
if camel_case {
dst.put_slice(b"\r\nContent-Length: 0\r\n");
} else {
dst.put_slice(b"\r\ncontent-length: 0\r\n");
}
}
BodySize::Sized(len) => helpers::write_content_length(len, dst),
BodySize::Sized64(len) => {
dst.extend_from_slice(b"\r\ncontent-length: ");
write!(dst.writer(), "{}", len)?;
dst.extend_from_slice(b"\r\n");
}
BodySize::None => dst.extend_from_slice(b"\r\n"),
BodySize::None => dst.put_slice(b"\r\n"),
}
// Connection
match ctype {
ConnectionType::Upgrade => dst.extend_from_slice(b"connection: upgrade\r\n"),
ConnectionType::Upgrade => dst.put_slice(b"connection: upgrade\r\n"),
ConnectionType::KeepAlive if version < Version::HTTP_11 => {
dst.extend_from_slice(b"connection: keep-alive\r\n")
if camel_case {
dst.put_slice(b"Connection: keep-alive\r\n")
} else {
dst.put_slice(b"connection: keep-alive\r\n")
}
}
ConnectionType::Close if version >= Version::HTTP_11 => {
dst.extend_from_slice(b"connection: close\r\n")
if camel_case {
dst.put_slice(b"Connection: close\r\n")
} else {
dst.put_slice(b"connection: close\r\n")
}
}
_ => (),
}
// merging headers from head and extra headers. HeaderMap::new() does not allocate.
let empty_headers = HeaderMap::new();
let extra_headers = self.extra_headers().unwrap_or(&empty_headers);
let headers = self
.headers()
.inner
.iter()
.filter(|(name, _)| !extra_headers.contains_key(*name))
.chain(extra_headers.inner.iter());
// write headers
let mut pos = 0;
let mut has_date = false;
let mut remaining = dst.remaining_mut();
let mut buf = unsafe { &mut *(dst.bytes_mut() as *mut [u8]) };
for (key, value) in self.headers() {
let mut buf = dst.bytes_mut().as_mut_ptr() as *mut u8;
let mut remaining = dst.capacity() - dst.len();
// tracks bytes written since last buffer resize
// since buf is a raw pointer to a bytes container storage but is written to without the
// container's knowledge, this is used to sync the containers cursor after data is written
let mut pos = 0;
for (key, value) in headers {
match *key {
CONNECTION => continue,
TRANSFER_ENCODING | CONTENT_LENGTH if skip_len => continue,
DATE => {
has_date = true;
}
DATE => has_date = true,
_ => (),
}
let v = value.as_ref();
let k = key.as_str().as_bytes();
let len = k.len() + v.len() + 4;
if len > remaining {
unsafe {
dst.advance_mut(pos);
let k_len = k.len();
match value {
map::Value::One(ref val) => {
let v = val.as_ref();
let v_len = v.len();
// key length + value length + colon + space + \r\n
let len = k_len + v_len + 4;
if len > remaining {
// not enough room in buffer for this header; reserve more space
// SAFETY: all the bytes written up to position "pos" are initialized
// the written byte count and pointer advancement are kept in sync
unsafe {
dst.advance_mut(pos);
}
pos = 0;
dst.reserve(len * 2);
remaining = dst.capacity() - dst.len();
// re-assign buf raw pointer since it's possible that the buffer was
// reallocated and/or resized
buf = dst.bytes_mut().as_mut_ptr() as *mut u8;
}
// SAFETY: on each write, it is enough to ensure that the advancement of the
// cursor matches the number of bytes written
unsafe {
// use upper Camel-Case
if camel_case {
write_camel_case(k, from_raw_parts_mut(buf, k_len))
} else {
write_data(k, buf, k_len)
}
buf = buf.add(k_len);
write_data(b": ", buf, 2);
buf = buf.add(2);
write_data(v, buf, v_len);
buf = buf.add(v_len);
write_data(b"\r\n", buf, 2);
buf = buf.add(2);
}
pos += len;
remaining -= len;
}
pos = 0;
dst.reserve(len);
remaining = dst.remaining_mut();
unsafe {
buf = &mut *(dst.bytes_mut() as *mut _);
map::Value::Multi(ref vec) => {
for val in vec {
let v = val.as_ref();
let v_len = v.len();
let len = k_len + v_len + 4;
if len > remaining {
// SAFETY: all the bytes written up to position "pos" are initialized
// the written byte count and pointer advancement are kept in sync
unsafe {
dst.advance_mut(pos);
}
pos = 0;
dst.reserve(len * 2);
remaining = dst.capacity() - dst.len();
// re-assign buf raw pointer since it's possible that the buffer was
// reallocated and/or resized
buf = dst.bytes_mut().as_mut_ptr() as *mut u8;
}
// SAFETY: on each write, it is enough to ensure that the advancement of
// the cursor matches the number of bytes written
unsafe {
if camel_case {
write_camel_case(k, from_raw_parts_mut(buf, k_len));
} else {
write_data(k, buf, k_len);
}
buf = buf.add(k_len);
write_data(b": ", buf, 2);
buf = buf.add(2);
write_data(v, buf, v_len);
buf = buf.add(v_len);
write_data(b"\r\n", buf, 2);
buf = buf.add(2);
};
pos += len;
remaining -= len;
}
}
}
buf[pos..pos + k.len()].copy_from_slice(k);
pos += k.len();
buf[pos..pos + 2].copy_from_slice(b": ");
pos += 2;
buf[pos..pos + v.len()].copy_from_slice(v);
pos += v.len();
buf[pos..pos + 2].copy_from_slice(b"\r\n");
pos += 2;
remaining -= len;
}
// final cursor synchronization with the bytes container
//
// SAFETY: all the bytes written up to position "pos" are initialized
// the written byte count and pointer advancement are kept in sync
unsafe {
dst.advance_mut(pos);
}
@ -171,14 +281,14 @@ impl MessageType for Response<()> {
self.head().chunked()
}
//fn connection_type(&self) -> Option<ConnectionType> {
// self.head().ctype
//}
fn headers(&self) -> &HeaderMap {
&self.head().headers
}
fn extra_headers(&self) -> Option<&HeaderMap> {
None
}
fn encode_status(&mut self, dst: &mut BytesMut) -> io::Result<()> {
let head = self.head();
let reason = head.reason().as_bytes();
@ -186,35 +296,51 @@ impl MessageType for Response<()> {
// status line
helpers::write_status_line(head.version, head.status.as_u16(), dst);
dst.extend_from_slice(reason);
dst.put_slice(reason);
Ok(())
}
}
impl MessageType for RequestHead {
impl MessageType for RequestHeadType {
fn status(&self) -> Option<StatusCode> {
None
}
fn chunked(&self) -> bool {
self.chunked()
self.as_ref().chunked()
}
fn camel_case(&self) -> bool {
self.as_ref().camel_case_headers()
}
fn headers(&self) -> &HeaderMap {
&self.headers
self.as_ref().headers()
}
fn extra_headers(&self) -> Option<&HeaderMap> {
self.extra_headers()
}
fn encode_status(&mut self, dst: &mut BytesMut) -> io::Result<()> {
let head = self.as_ref();
dst.reserve(256 + head.headers.len() * AVERAGE_HEADER_SIZE);
write!(
Writer(dst),
"{} {} {}",
self.method,
self.uri.path_and_query().map(|u| u.as_str()).unwrap_or("/"),
match self.version {
head.method,
head.uri.path_and_query().map(|u| u.as_str()).unwrap_or("/"),
match head.version {
Version::HTTP_09 => "HTTP/0.9",
Version::HTTP_10 => "HTTP/1.0",
Version::HTTP_11 => "HTTP/1.1",
Version::HTTP_2 => "HTTP/2.0",
Version::HTTP_3 => "HTTP/3.0",
_ =>
return Err(io::Error::new(
io::ErrorKind::Other,
"unsupported version"
)),
}
)
.map_err(|e| io::Error::new(io::ErrorKind::Other, e))
@ -247,8 +373,7 @@ impl<T: MessageType> MessageEncoder<T> {
if !head {
self.te = match length {
BodySize::Empty => TransferEncoding::empty(),
BodySize::Sized(len) => TransferEncoding::length(len as u64),
BodySize::Sized64(len) => TransferEncoding::length(len),
BodySize::Sized(len) => TransferEncoding::length(len),
BodySize::Stream => {
if message.chunked() && !stream {
TransferEncoding::chunked()
@ -396,10 +521,51 @@ impl<'a> io::Write for Writer<'a> {
}
}
/// # Safety
/// Callers must ensure that the given length matches given value length.
unsafe fn write_data(value: &[u8], buf: *mut u8, len: usize) {
debug_assert_eq!(value.len(), len);
copy_nonoverlapping(value.as_ptr(), buf, len);
}
fn write_camel_case(value: &[u8], buffer: &mut [u8]) {
let mut index = 0;
let key = value;
let mut key_iter = key.iter();
if let Some(c) = key_iter.next() {
if *c >= b'a' && *c <= b'z' {
buffer[index] = *c ^ b' ';
index += 1;
}
} else {
return;
}
while let Some(c) = key_iter.next() {
buffer[index] = *c;
index += 1;
if *c == b'-' {
if let Some(c) = key_iter.next() {
if *c >= b'a' && *c <= b'z' {
buffer[index] = *c ^ b' ';
index += 1;
}
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::rc::Rc;
use bytes::Bytes;
use http::header::AUTHORIZATION;
use super::*;
use crate::http::header::{HeaderValue, CONTENT_TYPE};
use crate::RequestHead;
#[test]
fn test_chunked_te() {
@ -410,8 +576,104 @@ mod tests {
assert!(enc.encode(b"", &mut bytes).ok().unwrap());
}
assert_eq!(
bytes.take().freeze(),
bytes.split().freeze(),
Bytes::from_static(b"4\r\ntest\r\n0\r\n\r\n")
);
}
#[test]
fn test_camel_case() {
let mut bytes = BytesMut::with_capacity(2048);
let mut head = RequestHead::default();
head.set_camel_case_headers(true);
head.headers.insert(DATE, HeaderValue::from_static("date"));
head.headers
.insert(CONTENT_TYPE, HeaderValue::from_static("plain/text"));
let mut head = RequestHeadType::Owned(head);
let _ = head.encode_headers(
&mut bytes,
Version::HTTP_11,
BodySize::Empty,
ConnectionType::Close,
&ServiceConfig::default(),
);
let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(data.contains("Content-Length: 0\r\n"));
assert!(data.contains("Connection: close\r\n"));
assert!(data.contains("Content-Type: plain/text\r\n"));
assert!(data.contains("Date: date\r\n"));
let _ = head.encode_headers(
&mut bytes,
Version::HTTP_11,
BodySize::Stream,
ConnectionType::KeepAlive,
&ServiceConfig::default(),
);
let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(data.contains("Transfer-Encoding: chunked\r\n"));
assert!(data.contains("Content-Type: plain/text\r\n"));
assert!(data.contains("Date: date\r\n"));
let mut head = RequestHead::default();
head.set_camel_case_headers(false);
head.headers.insert(DATE, HeaderValue::from_static("date"));
head.headers
.insert(CONTENT_TYPE, HeaderValue::from_static("plain/text"));
head.headers
.append(CONTENT_TYPE, HeaderValue::from_static("xml"));
let mut head = RequestHeadType::Owned(head);
let _ = head.encode_headers(
&mut bytes,
Version::HTTP_11,
BodySize::Stream,
ConnectionType::KeepAlive,
&ServiceConfig::default(),
);
let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(data.contains("transfer-encoding: chunked\r\n"));
assert!(data.contains("content-type: xml\r\n"));
assert!(data.contains("content-type: plain/text\r\n"));
assert!(data.contains("date: date\r\n"));
}
#[test]
fn test_extra_headers() {
let mut bytes = BytesMut::with_capacity(2048);
let mut head = RequestHead::default();
head.headers.insert(
AUTHORIZATION,
HeaderValue::from_static("some authorization"),
);
let mut extra_headers = HeaderMap::new();
extra_headers.insert(
AUTHORIZATION,
HeaderValue::from_static("another authorization"),
);
extra_headers.insert(DATE, HeaderValue::from_static("date"));
let mut head = RequestHeadType::Rc(Rc::new(head), Some(extra_headers));
let _ = head.encode_headers(
&mut bytes,
Version::HTTP_11,
BodySize::Empty,
ConnectionType::Close,
&ServiceConfig::default(),
);
let data =
String::from_utf8(Vec::from(bytes.split().freeze().as_ref())).unwrap();
assert!(data.contains("content-length: 0\r\n"));
assert!(data.contains("connection: close\r\n"));
assert!(data.contains("authorization: another authorization\r\n"));
assert!(data.contains("date: date\r\n"));
}
}

View File

@ -0,0 +1,38 @@
use std::task::{Context, Poll};
use actix_service::{Service, ServiceFactory};
use futures_util::future::{ok, Ready};
use crate::error::Error;
use crate::request::Request;
pub struct ExpectHandler;
impl ServiceFactory for ExpectHandler {
type Config = ();
type Request = Request;
type Response = Request;
type Error = Error;
type Service = ExpectHandler;
type InitError = Error;
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
ok(ExpectHandler)
}
}
impl Service for ExpectHandler {
type Request = Request;
type Response = Request;
type Error = Error;
type Future = Ready<Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Request) -> Self::Future {
ok(req)
}
}

View File

@ -1,19 +1,25 @@
//! HTTP/1 implementation
use bytes::Bytes;
use bytes::{Bytes, BytesMut};
mod client;
mod codec;
mod decoder;
mod dispatcher;
mod encoder;
mod expect;
mod payload;
mod service;
mod upgrade;
mod utils;
pub use self::client::{ClientCodec, ClientPayloadCodec};
pub use self::codec::Codec;
pub use self::dispatcher::Dispatcher;
pub use self::payload::{Payload, PayloadBuffer};
pub use self::expect::ExpectHandler;
pub use self::payload::Payload;
pub use self::service::{H1Service, H1ServiceHandler, OneRequest};
pub use self::upgrade::UpgradeHandler;
pub use self::utils::SendResponse;
#[derive(Debug)]
/// Codec message
@ -38,6 +44,16 @@ pub enum MessageType {
Stream,
}
const LW: usize = 2 * 1024;
const HW: usize = 32 * 1024;
pub(crate) fn reserve_readbuf(src: &mut BytesMut) {
let cap = src.capacity();
if cap < LW {
src.reserve(HW - cap);
}
}
#[cfg(test)]
mod tests {
use super::*;

View File

@ -1,13 +1,13 @@
//! Payload stream
use std::cell::RefCell;
use std::cmp;
use std::collections::VecDeque;
use std::pin::Pin;
use std::rc::{Rc, Weak};
use std::task::{Context, Poll};
use bytes::{Bytes, BytesMut};
use futures::task::current as current_task;
use futures::task::Task;
use futures::{Async, Poll, Stream};
use actix_utils::task::LocalWaker;
use bytes::Bytes;
use futures_core::Stream;
use crate::error::PayloadError;
@ -15,7 +15,7 @@ use crate::error::PayloadError;
pub(crate) const MAX_BUFFER_SIZE: usize = 32_768;
#[derive(Debug, PartialEq)]
pub(crate) enum PayloadStatus {
pub enum PayloadStatus {
Read,
Pause,
Dropped,
@ -80,88 +80,61 @@ impl Payload {
}
#[inline]
/// Set read buffer capacity
///
/// Default buffer capacity is 32Kb.
pub fn set_read_buffer_capacity(&mut self, cap: usize) {
self.inner.borrow_mut().capacity = cap;
pub fn readany(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, PayloadError>>> {
self.inner.borrow_mut().readany(cx)
}
}
impl Stream for Payload {
type Item = Bytes;
type Error = PayloadError;
type Item = Result<Bytes, PayloadError>;
#[inline]
fn poll(&mut self) -> Poll<Option<Bytes>, PayloadError> {
self.inner.borrow_mut().readany()
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, PayloadError>>> {
self.inner.borrow_mut().readany(cx)
}
}
impl Clone for Payload {
fn clone(&self) -> Payload {
Payload {
inner: Rc::clone(&self.inner),
}
}
}
/// Payload writer interface.
pub(crate) trait PayloadWriter {
/// Set stream error.
fn set_error(&mut self, err: PayloadError);
/// Write eof into a stream which closes reading side of a stream.
fn feed_eof(&mut self);
/// Feed bytes into a payload stream
fn feed_data(&mut self, data: Bytes);
/// Need read data
fn need_read(&self) -> PayloadStatus;
}
/// Sender part of the payload stream
pub struct PayloadSender {
inner: Weak<RefCell<Inner>>,
}
impl PayloadWriter for PayloadSender {
impl PayloadSender {
#[inline]
fn set_error(&mut self, err: PayloadError) {
pub fn set_error(&mut self, err: PayloadError) {
if let Some(shared) = self.inner.upgrade() {
shared.borrow_mut().set_error(err)
}
}
#[inline]
fn feed_eof(&mut self) {
pub fn feed_eof(&mut self) {
if let Some(shared) = self.inner.upgrade() {
shared.borrow_mut().feed_eof()
}
}
#[inline]
fn feed_data(&mut self, data: Bytes) {
pub fn feed_data(&mut self, data: Bytes) {
if let Some(shared) = self.inner.upgrade() {
shared.borrow_mut().feed_data(data)
}
}
#[inline]
fn need_read(&self) -> PayloadStatus {
pub fn need_read(&self, cx: &mut Context<'_>) -> PayloadStatus {
// we check need_read only if Payload (other side) is alive,
// otherwise always return true (consume payload)
if let Some(shared) = self.inner.upgrade() {
if shared.borrow().need_read {
PayloadStatus::Read
} else {
#[cfg(not(test))]
{
if shared.borrow_mut().io_task.is_none() {
shared.borrow_mut().io_task = Some(current_task());
}
}
shared.borrow_mut().io_task.register(cx.waker());
PayloadStatus::Pause
}
} else {
@ -177,9 +150,8 @@ struct Inner {
err: Option<PayloadError>,
need_read: bool,
items: VecDeque<Bytes>,
capacity: usize,
task: Option<Task>,
io_task: Option<Task>,
task: LocalWaker,
io_task: LocalWaker,
}
impl Inner {
@ -190,9 +162,8 @@ impl Inner {
err: None,
items: VecDeque::new(),
need_read: true,
capacity: MAX_BUFFER_SIZE,
task: None,
io_task: None,
task: LocalWaker::new(),
io_task: LocalWaker::new(),
}
}
@ -210,9 +181,9 @@ impl Inner {
fn feed_data(&mut self, data: Bytes) {
self.len += data.len();
self.items.push_back(data);
self.need_read = self.len < self.capacity;
self.need_read = self.len < MAX_BUFFER_SIZE;
if let Some(task) = self.task.take() {
task.notify()
task.wake()
}
}
@ -221,34 +192,28 @@ impl Inner {
self.len
}
fn readany(&mut self) -> Poll<Option<Bytes>, PayloadError> {
fn readany(
&mut self,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, PayloadError>>> {
if let Some(data) = self.items.pop_front() {
self.len -= data.len();
self.need_read = self.len < self.capacity;
self.need_read = self.len < MAX_BUFFER_SIZE;
if self.need_read && self.task.is_none() && !self.eof {
self.task = Some(current_task());
if self.need_read && !self.eof {
self.task.register(cx.waker());
}
if let Some(task) = self.io_task.take() {
task.notify()
}
Ok(Async::Ready(Some(data)))
self.io_task.wake();
Poll::Ready(Some(Ok(data)))
} else if let Some(err) = self.err.take() {
Err(err)
Poll::Ready(Some(Err(err)))
} else if self.eof {
Ok(Async::Ready(None))
Poll::Ready(None)
} else {
self.need_read = true;
#[cfg(not(test))]
{
if self.task.is_none() {
self.task = Some(current_task());
}
if let Some(task) = self.io_task.take() {
task.notify()
}
}
Ok(Async::NotReady)
self.task.register(cx.waker());
self.io_task.wake();
Poll::Pending
}
}
@ -258,426 +223,22 @@ impl Inner {
}
}
/// Payload buffer
pub struct PayloadBuffer<S> {
len: usize,
items: VecDeque<Bytes>,
stream: S,
}
impl<S> PayloadBuffer<S>
where
S: Stream<Item = Bytes, Error = PayloadError>,
{
/// Create new `PayloadBuffer` instance
pub fn new(stream: S) -> Self {
PayloadBuffer {
len: 0,
items: VecDeque::new(),
stream,
}
}
/// Get mutable reference to an inner stream.
pub fn get_mut(&mut self) -> &mut S {
&mut self.stream
}
#[inline]
fn poll_stream(&mut self) -> Poll<bool, PayloadError> {
self.stream.poll().map(|res| match res {
Async::Ready(Some(data)) => {
self.len += data.len();
self.items.push_back(data);
Async::Ready(true)
}
Async::Ready(None) => Async::Ready(false),
Async::NotReady => Async::NotReady,
})
}
/// Read first available chunk of bytes
#[inline]
pub fn readany(&mut self) -> Poll<Option<Bytes>, PayloadError> {
if let Some(data) = self.items.pop_front() {
self.len -= data.len();
Ok(Async::Ready(Some(data)))
} else {
match self.poll_stream()? {
Async::Ready(true) => self.readany(),
Async::Ready(false) => Ok(Async::Ready(None)),
Async::NotReady => Ok(Async::NotReady),
}
}
}
/// Check if buffer contains enough bytes
#[inline]
pub fn can_read(&mut self, size: usize) -> Poll<Option<bool>, PayloadError> {
if size <= self.len {
Ok(Async::Ready(Some(true)))
} else {
match self.poll_stream()? {
Async::Ready(true) => self.can_read(size),
Async::Ready(false) => Ok(Async::Ready(None)),
Async::NotReady => Ok(Async::NotReady),
}
}
}
/// Return reference to the first chunk of data
#[inline]
pub fn get_chunk(&mut self) -> Poll<Option<&[u8]>, PayloadError> {
if self.items.is_empty() {
match self.poll_stream()? {
Async::Ready(true) => (),
Async::Ready(false) => return Ok(Async::Ready(None)),
Async::NotReady => return Ok(Async::NotReady),
}
}
match self.items.front().map(|c| c.as_ref()) {
Some(chunk) => Ok(Async::Ready(Some(chunk))),
None => Ok(Async::NotReady),
}
}
/// Read exact number of bytes
#[inline]
pub fn read_exact(&mut self, size: usize) -> Poll<Option<Bytes>, PayloadError> {
if size <= self.len {
self.len -= size;
let mut chunk = self.items.pop_front().unwrap();
if size < chunk.len() {
let buf = chunk.split_to(size);
self.items.push_front(chunk);
Ok(Async::Ready(Some(buf)))
} else if size == chunk.len() {
Ok(Async::Ready(Some(chunk)))
} else {
let mut buf = BytesMut::with_capacity(size);
buf.extend_from_slice(&chunk);
while buf.len() < size {
let mut chunk = self.items.pop_front().unwrap();
let rem = cmp::min(size - buf.len(), chunk.len());
buf.extend_from_slice(&chunk.split_to(rem));
if !chunk.is_empty() {
self.items.push_front(chunk);
}
}
Ok(Async::Ready(Some(buf.freeze())))
}
} else {
match self.poll_stream()? {
Async::Ready(true) => self.read_exact(size),
Async::Ready(false) => Ok(Async::Ready(None)),
Async::NotReady => Ok(Async::NotReady),
}
}
}
/// Remove specified amount if bytes from buffer
#[inline]
pub fn drop_bytes(&mut self, size: usize) {
if size <= self.len {
self.len -= size;
let mut len = 0;
while len < size {
let mut chunk = self.items.pop_front().unwrap();
let rem = cmp::min(size - len, chunk.len());
len += rem;
if rem < chunk.len() {
chunk.split_to(rem);
self.items.push_front(chunk);
}
}
}
}
/// Copy buffered data
pub fn copy(&mut self, size: usize) -> Poll<Option<BytesMut>, PayloadError> {
if size <= self.len {
let mut buf = BytesMut::with_capacity(size);
for chunk in &self.items {
if buf.len() < size {
let rem = cmp::min(size - buf.len(), chunk.len());
buf.extend_from_slice(&chunk[..rem]);
}
if buf.len() == size {
return Ok(Async::Ready(Some(buf)));
}
}
}
match self.poll_stream()? {
Async::Ready(true) => self.copy(size),
Async::Ready(false) => Ok(Async::Ready(None)),
Async::NotReady => Ok(Async::NotReady),
}
}
/// Read until specified ending
pub fn read_until(&mut self, line: &[u8]) -> Poll<Option<Bytes>, PayloadError> {
let mut idx = 0;
let mut num = 0;
let mut offset = 0;
let mut found = false;
let mut length = 0;
for no in 0..self.items.len() {
{
let chunk = &self.items[no];
for (pos, ch) in chunk.iter().enumerate() {
if *ch == line[idx] {
idx += 1;
if idx == line.len() {
num = no;
offset = pos + 1;
length += pos + 1;
found = true;
break;
}
} else {
idx = 0
}
}
if !found {
length += chunk.len()
}
}
if found {
let mut buf = BytesMut::with_capacity(length);
if num > 0 {
for _ in 0..num {
buf.extend_from_slice(&self.items.pop_front().unwrap());
}
}
if offset > 0 {
let mut chunk = self.items.pop_front().unwrap();
buf.extend_from_slice(&chunk.split_to(offset));
if !chunk.is_empty() {
self.items.push_front(chunk)
}
}
self.len -= length;
return Ok(Async::Ready(Some(buf.freeze())));
}
}
match self.poll_stream()? {
Async::Ready(true) => self.read_until(line),
Async::Ready(false) => Ok(Async::Ready(None)),
Async::NotReady => Ok(Async::NotReady),
}
}
/// Read bytes until new line delimiter
pub fn readline(&mut self) -> Poll<Option<Bytes>, PayloadError> {
self.read_until(b"\n")
}
/// Put unprocessed data back to the buffer
pub fn unprocessed(&mut self, data: Bytes) {
self.len += data.len();
self.items.push_front(data);
}
/// Get remaining data from the buffer
pub fn remaining(&mut self) -> Bytes {
self.items
.iter_mut()
.fold(BytesMut::new(), |mut b, c| {
b.extend_from_slice(c);
b
})
.freeze()
}
}
#[cfg(test)]
mod tests {
use super::*;
use actix_rt::Runtime;
use futures::future::{lazy, result};
use futures_util::future::poll_fn;
#[test]
fn test_basic() {
Runtime::new()
.unwrap()
.block_on(lazy(|| {
let (_, payload) = Payload::create(false);
let mut payload = PayloadBuffer::new(payload);
#[actix_rt::test]
async fn test_unread_data() {
let (_, mut payload) = Payload::create(false);
assert_eq!(payload.len, 0);
assert_eq!(Async::NotReady, payload.readany().ok().unwrap());
payload.unread_data(Bytes::from("data"));
assert!(!payload.is_empty());
assert_eq!(payload.len(), 4);
let res: Result<(), ()> = Ok(());
result(res)
}))
.unwrap();
}
#[test]
fn test_eof() {
Runtime::new()
.unwrap()
.block_on(lazy(|| {
let (mut sender, payload) = Payload::create(false);
let mut payload = PayloadBuffer::new(payload);
assert_eq!(Async::NotReady, payload.readany().ok().unwrap());
sender.feed_data(Bytes::from("data"));
sender.feed_eof();
assert_eq!(
Async::Ready(Some(Bytes::from("data"))),
payload.readany().ok().unwrap()
);
assert_eq!(payload.len, 0);
assert_eq!(Async::Ready(None), payload.readany().ok().unwrap());
let res: Result<(), ()> = Ok(());
result(res)
}))
.unwrap();
}
#[test]
fn test_err() {
Runtime::new()
.unwrap()
.block_on(lazy(|| {
let (mut sender, payload) = Payload::create(false);
let mut payload = PayloadBuffer::new(payload);
assert_eq!(Async::NotReady, payload.readany().ok().unwrap());
sender.set_error(PayloadError::Incomplete(None));
payload.readany().err().unwrap();
let res: Result<(), ()> = Ok(());
result(res)
}))
.unwrap();
}
#[test]
fn test_readany() {
Runtime::new()
.unwrap()
.block_on(lazy(|| {
let (mut sender, payload) = Payload::create(false);
let mut payload = PayloadBuffer::new(payload);
sender.feed_data(Bytes::from("line1"));
sender.feed_data(Bytes::from("line2"));
assert_eq!(
Async::Ready(Some(Bytes::from("line1"))),
payload.readany().ok().unwrap()
);
assert_eq!(payload.len, 0);
assert_eq!(
Async::Ready(Some(Bytes::from("line2"))),
payload.readany().ok().unwrap()
);
assert_eq!(payload.len, 0);
let res: Result<(), ()> = Ok(());
result(res)
}))
.unwrap();
}
#[test]
fn test_readexactly() {
Runtime::new()
.unwrap()
.block_on(lazy(|| {
let (mut sender, payload) = Payload::create(false);
let mut payload = PayloadBuffer::new(payload);
assert_eq!(Async::NotReady, payload.read_exact(2).ok().unwrap());
sender.feed_data(Bytes::from("line1"));
sender.feed_data(Bytes::from("line2"));
assert_eq!(
Async::Ready(Some(Bytes::from_static(b"li"))),
payload.read_exact(2).ok().unwrap()
);
assert_eq!(payload.len, 3);
assert_eq!(
Async::Ready(Some(Bytes::from_static(b"ne1l"))),
payload.read_exact(4).ok().unwrap()
);
assert_eq!(payload.len, 4);
sender.set_error(PayloadError::Incomplete(None));
payload.read_exact(10).err().unwrap();
let res: Result<(), ()> = Ok(());
result(res)
}))
.unwrap();
}
#[test]
fn test_readuntil() {
Runtime::new()
.unwrap()
.block_on(lazy(|| {
let (mut sender, payload) = Payload::create(false);
let mut payload = PayloadBuffer::new(payload);
assert_eq!(Async::NotReady, payload.read_until(b"ne").ok().unwrap());
sender.feed_data(Bytes::from("line1"));
sender.feed_data(Bytes::from("line2"));
assert_eq!(
Async::Ready(Some(Bytes::from("line"))),
payload.read_until(b"ne").ok().unwrap()
);
assert_eq!(payload.len, 1);
assert_eq!(
Async::Ready(Some(Bytes::from("1line2"))),
payload.read_until(b"2").ok().unwrap()
);
assert_eq!(payload.len, 0);
sender.set_error(PayloadError::Incomplete(None));
payload.read_until(b"b").err().unwrap();
let res: Result<(), ()> = Ok(());
result(res)
}))
.unwrap();
}
#[test]
fn test_unread_data() {
Runtime::new()
.unwrap()
.block_on(lazy(|| {
let (_, mut payload) = Payload::create(false);
payload.unread_data(Bytes::from("data"));
assert!(!payload.is_empty());
assert_eq!(payload.len(), 4);
assert_eq!(
Async::Ready(Some(Bytes::from("data"))),
payload.poll().ok().unwrap()
);
let res: Result<(), ()> = Ok(());
result(res)
}))
.unwrap();
assert_eq!(
Bytes::from("data"),
poll_fn(|cx| payload.readany(cx)).await.unwrap().unwrap()
);
}
}

View File

@ -1,81 +1,279 @@
use std::fmt::Debug;
use std::future::Future;
use std::marker::PhantomData;
use std::pin::Pin;
use std::rc::Rc;
use std::task::{Context, Poll};
use std::{fmt, net};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_server_config::{Io, ServerConfig as SrvConfig};
use actix_service::{IntoNewService, NewService, Service};
use actix_utils::cloneable::CloneableService;
use futures::future::{ok, FutureResult};
use futures::{try_ready, Async, Future, IntoFuture, Poll, Stream};
use actix_rt::net::TcpStream;
use actix_service::{pipeline_factory, IntoServiceFactory, Service, ServiceFactory};
use futures_core::ready;
use futures_util::future::{ok, Ready};
use crate::body::MessageBody;
use crate::config::{KeepAlive, ServiceConfig};
use crate::error::{DispatchError, ParseError};
use crate::cloneable::CloneableService;
use crate::config::ServiceConfig;
use crate::error::{DispatchError, Error, ParseError};
use crate::helpers::DataFactory;
use crate::request::Request;
use crate::response::Response;
use super::codec::Codec;
use super::dispatcher::Dispatcher;
use super::Message;
use super::{ExpectHandler, Message, UpgradeHandler};
/// `NewService` implementation for HTTP1 transport
pub struct H1Service<T, P, S, B> {
/// `ServiceFactory` implementation for HTTP1 transport
pub struct H1Service<T, S, B, X = ExpectHandler, U = UpgradeHandler<T>> {
srv: S,
cfg: ServiceConfig,
_t: PhantomData<(T, P, B)>,
expect: X,
upgrade: Option<U>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
_t: PhantomData<(T, B)>,
}
impl<T, P, S, B> H1Service<T, P, S, B>
impl<T, S, B> H1Service<T, S, B>
where
S: NewService<SrvConfig, Request = Request>,
S::Error: Debug,
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
S::Service: 'static,
B: MessageBody,
{
/// Create new `HttpService` instance with default config.
pub fn new<F: IntoNewService<S, SrvConfig>>(service: F) -> Self {
let cfg = ServiceConfig::new(KeepAlive::Timeout(5), 5000, 0);
H1Service {
cfg,
srv: service.into_new_service(),
_t: PhantomData,
}
}
/// Create new `HttpService` instance with config.
pub fn with_config<F: IntoNewService<S, SrvConfig>>(
pub(crate) fn with_config<F: IntoServiceFactory<S>>(
cfg: ServiceConfig,
service: F,
) -> Self {
H1Service {
cfg,
srv: service.into_new_service(),
srv: service.into_factory(),
expect: ExpectHandler,
upgrade: None,
on_connect: None,
_t: PhantomData,
}
}
}
impl<T, P, S, B> NewService<SrvConfig> for H1Service<T, P, S, B>
impl<S, B, X, U> H1Service<TcpStream, S, B, X, U>
where
T: AsyncRead + AsyncWrite,
S: NewService<SrvConfig, Request = Request>,
S::Error: Debug,
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
S::Service: 'static,
B: MessageBody,
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
U: ServiceFactory<
Config = (),
Request = (Request, Framed<TcpStream, Codec>),
Response = (),
>,
U::Error: fmt::Display + Into<Error>,
U::InitError: fmt::Debug,
{
/// Create simple tcp stream service
pub fn tcp(
self,
) -> impl ServiceFactory<
Config = (),
Request = TcpStream,
Response = (),
Error = DispatchError,
InitError = (),
> {
pipeline_factory(|io: TcpStream| {
let peer_addr = io.peer_addr().ok();
ok((io, peer_addr))
})
.and_then(self)
}
}
#[cfg(feature = "openssl")]
mod openssl {
use super::*;
use actix_tls::openssl::{Acceptor, SslAcceptor, SslStream};
use actix_tls::{openssl::HandshakeError, TlsError};
impl<S, B, X, U> H1Service<SslStream<TcpStream>, S, B, X, U>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
U: ServiceFactory<
Config = (),
Request = (Request, Framed<SslStream<TcpStream>, Codec>),
Response = (),
>,
U::Error: fmt::Display + Into<Error>,
U::InitError: fmt::Debug,
{
/// Create openssl based service
pub fn openssl(
self,
acceptor: SslAcceptor,
) -> impl ServiceFactory<
Config = (),
Request = TcpStream,
Response = (),
Error = TlsError<HandshakeError<TcpStream>, DispatchError>,
InitError = (),
> {
pipeline_factory(
Acceptor::new(acceptor)
.map_err(TlsError::Tls)
.map_init_err(|_| panic!()),
)
.and_then(|io: SslStream<TcpStream>| {
let peer_addr = io.get_ref().peer_addr().ok();
ok((io, peer_addr))
})
.and_then(self.map_err(TlsError::Service))
}
}
}
#[cfg(feature = "rustls")]
mod rustls {
use super::*;
use actix_tls::rustls::{Acceptor, ServerConfig, TlsStream};
use actix_tls::TlsError;
use std::{fmt, io};
impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
U: ServiceFactory<
Config = (),
Request = (Request, Framed<TlsStream<TcpStream>, Codec>),
Response = (),
>,
U::Error: fmt::Display + Into<Error>,
U::InitError: fmt::Debug,
{
/// Create rustls based service
pub fn rustls(
self,
config: ServerConfig,
) -> impl ServiceFactory<
Config = (),
Request = TcpStream,
Response = (),
Error = TlsError<io::Error, DispatchError>,
InitError = (),
> {
pipeline_factory(
Acceptor::new(config)
.map_err(TlsError::Tls)
.map_init_err(|_| panic!()),
)
.and_then(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().0.peer_addr().ok();
ok((io, peer_addr))
})
.and_then(self.map_err(TlsError::Service))
}
}
}
impl<T, S, B, X, U> H1Service<T, S, B, X, U>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
S::InitError: fmt::Debug,
B: MessageBody,
{
type Request = Io<T, P>;
pub fn expect<X1>(self, expect: X1) -> H1Service<T, S, B, X1, U>
where
X1: ServiceFactory<Request = Request, Response = Request>,
X1::Error: Into<Error>,
X1::InitError: fmt::Debug,
{
H1Service {
expect,
cfg: self.cfg,
srv: self.srv,
upgrade: self.upgrade,
on_connect: self.on_connect,
_t: PhantomData,
}
}
pub fn upgrade<U1>(self, upgrade: Option<U1>) -> H1Service<T, S, B, X, U1>
where
U1: ServiceFactory<Request = (Request, Framed<T, Codec>), Response = ()>,
U1::Error: fmt::Display,
U1::InitError: fmt::Debug,
{
H1Service {
upgrade,
cfg: self.cfg,
srv: self.srv,
expect: self.expect,
on_connect: self.on_connect,
_t: PhantomData,
}
}
/// Set on connect callback.
pub(crate) fn on_connect(
mut self,
f: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
) -> Self {
self.on_connect = f;
self
}
}
impl<T, S, B, X, U> ServiceFactory for H1Service<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
S::InitError: fmt::Debug,
B: MessageBody,
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
U: ServiceFactory<Config = (), Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display + Into<Error>,
U::InitError: fmt::Debug,
{
type Config = ();
type Request = (T, Option<net::SocketAddr>);
type Response = ();
type Error = DispatchError;
type InitError = S::InitError;
type Service = H1ServiceHandler<T, P, S::Service, B>;
type Future = H1ServiceResponse<T, P, S, B>;
type InitError = ();
type Service = H1ServiceHandler<T, S::Service, B, X::Service, U::Service>;
type Future = H1ServiceResponse<T, S, B, X, U>;
fn new_service(&self, cfg: &SrvConfig) -> Self::Future {
fn new_service(&self, _: ()) -> Self::Future {
H1ServiceResponse {
fut: self.srv.new_service(cfg).into_future(),
fut: self.srv.new_service(()),
fut_ex: Some(self.expect.new_service(())),
fut_upg: self.upgrade.as_ref().map(|f| f.new_service(())),
expect: None,
upgrade: None,
on_connect: self.on_connect.clone(),
cfg: Some(self.cfg.clone()),
_t: PhantomData,
}
@ -83,91 +281,215 @@ where
}
#[doc(hidden)]
pub struct H1ServiceResponse<T, P, S: NewService<SrvConfig, Request = Request>, B> {
fut: <S::Future as IntoFuture>::Future,
#[pin_project::pin_project]
pub struct H1ServiceResponse<T, S, B, X, U>
where
S: ServiceFactory<Request = Request>,
S::Error: Into<Error>,
S::InitError: fmt::Debug,
X: ServiceFactory<Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
U: ServiceFactory<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
U::InitError: fmt::Debug,
{
#[pin]
fut: S::Future,
#[pin]
fut_ex: Option<X::Future>,
#[pin]
fut_upg: Option<U::Future>,
expect: Option<X::Service>,
upgrade: Option<U::Service>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
cfg: Option<ServiceConfig>,
_t: PhantomData<(T, P, B)>,
_t: PhantomData<(T, B)>,
}
impl<T, P, S, B> Future for H1ServiceResponse<T, P, S, B>
impl<T, S, B, X, U> Future for H1ServiceResponse<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite,
S: NewService<SrvConfig, Request = Request>,
S::Service: 'static,
S::Error: Debug,
T: AsyncRead + AsyncWrite + Unpin,
S: ServiceFactory<Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
S::InitError: fmt::Debug,
B: MessageBody,
X: ServiceFactory<Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
U: ServiceFactory<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
U::InitError: fmt::Debug,
{
type Item = H1ServiceHandler<T, P, S::Service, B>;
type Error = S::InitError;
type Output = Result<H1ServiceHandler<T, S::Service, B, X::Service, U::Service>, ()>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let service = try_ready!(self.fut.poll());
Ok(Async::Ready(H1ServiceHandler::new(
self.cfg.take().unwrap(),
service,
)))
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.as_mut().project();
if let Some(fut) = this.fut_ex.as_pin_mut() {
let expect = ready!(fut
.poll(cx)
.map_err(|e| log::error!("Init http service error: {:?}", e)))?;
this = self.as_mut().project();
*this.expect = Some(expect);
this.fut_ex.set(None);
}
if let Some(fut) = this.fut_upg.as_pin_mut() {
let upgrade = ready!(fut
.poll(cx)
.map_err(|e| log::error!("Init http service error: {:?}", e)))?;
this = self.as_mut().project();
*this.upgrade = Some(upgrade);
this.fut_ex.set(None);
}
let result = ready!(this
.fut
.poll(cx)
.map_err(|e| log::error!("Init http service error: {:?}", e)));
Poll::Ready(result.map(|service| {
let this = self.as_mut().project();
H1ServiceHandler::new(
this.cfg.take().unwrap(),
service,
this.expect.take().unwrap(),
this.upgrade.take(),
this.on_connect.clone(),
)
}))
}
}
/// `Service` implementation for HTTP1 transport
pub struct H1ServiceHandler<T, P, S: 'static, B> {
pub struct H1ServiceHandler<T, S: Service, B, X: Service, U: Service> {
srv: CloneableService<S>,
expect: CloneableService<X>,
upgrade: Option<CloneableService<U>>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
cfg: ServiceConfig,
_t: PhantomData<(T, P, B)>,
_t: PhantomData<(T, B)>,
}
impl<T, P, S, B> H1ServiceHandler<T, P, S, B>
impl<T, S, B, X, U> H1ServiceHandler<T, S, B, X, U>
where
S: Service<Request = Request>,
S::Error: Debug,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
fn new(cfg: ServiceConfig, srv: S) -> H1ServiceHandler<T, P, S, B> {
fn new(
cfg: ServiceConfig,
srv: S,
expect: X,
upgrade: Option<U>,
on_connect: Option<Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
) -> H1ServiceHandler<T, S, B, X, U> {
H1ServiceHandler {
srv: CloneableService::new(srv),
expect: CloneableService::new(expect),
upgrade: upgrade.map(CloneableService::new),
cfg,
on_connect,
_t: PhantomData,
}
}
}
impl<T, P, S, B> Service for H1ServiceHandler<T, P, S, B>
impl<T, S, B, X, U> Service for H1ServiceHandler<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite,
T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request = Request>,
S::Error: Debug,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display + Into<Error>,
{
type Request = Io<T, P>;
type Request = (T, Option<net::SocketAddr>);
type Response = ();
type Error = DispatchError;
type Future = Dispatcher<T, S, B>;
type Future = Dispatcher<T, S, B, X, U>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.srv.poll_ready().map_err(|e| {
log::error!("Http service readiness error: {:?}", e);
DispatchError::Service
})
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let ready = self
.expect
.poll_ready(cx)
.map_err(|e| {
let e = e.into();
log::error!("Http service readiness error: {:?}", e);
DispatchError::Service(e)
})?
.is_ready();
let ready = self
.srv
.poll_ready(cx)
.map_err(|e| {
let e = e.into();
log::error!("Http service readiness error: {:?}", e);
DispatchError::Service(e)
})?
.is_ready()
&& ready;
let ready = if let Some(ref mut upg) = self.upgrade {
upg.poll_ready(cx)
.map_err(|e| {
let e = e.into();
log::error!("Http service readiness error: {:?}", e);
DispatchError::Service(e)
})?
.is_ready()
&& ready
} else {
ready
};
if ready {
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
}
fn call(&mut self, req: Self::Request) -> Self::Future {
Dispatcher::new(req.into_parts().0, self.cfg.clone(), self.srv.clone())
fn call(&mut self, (io, addr): Self::Request) -> Self::Future {
let on_connect = if let Some(ref on_connect) = self.on_connect {
Some(on_connect(&io))
} else {
None
};
Dispatcher::new(
io,
self.cfg.clone(),
self.srv.clone(),
self.expect.clone(),
self.upgrade.clone(),
on_connect,
addr,
)
}
}
/// `NewService` implementation for `OneRequestService` service
/// `ServiceFactory` implementation for `OneRequestService` service
#[derive(Default)]
pub struct OneRequest<T, P> {
pub struct OneRequest<T> {
config: ServiceConfig,
_t: PhantomData<(T, P)>,
_t: PhantomData<T>,
}
impl<T, P> OneRequest<T, P>
impl<T> OneRequest<T>
where
T: AsyncRead + AsyncWrite,
T: AsyncRead + AsyncWrite + Unpin,
{
/// Create new `H1SimpleService` instance.
pub fn new() -> Self {
@ -178,80 +500,82 @@ where
}
}
impl<T, P> NewService<SrvConfig> for OneRequest<T, P>
impl<T> ServiceFactory for OneRequest<T>
where
T: AsyncRead + AsyncWrite,
T: AsyncRead + AsyncWrite + Unpin,
{
type Request = Io<T, P>;
type Config = ();
type Request = T;
type Response = (Request, Framed<T, Codec>);
type Error = ParseError;
type InitError = ();
type Service = OneRequestService<T, P>;
type Future = FutureResult<Self::Service, Self::InitError>;
type Service = OneRequestService<T>;
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: &SrvConfig) -> Self::Future {
fn new_service(&self, _: ()) -> Self::Future {
ok(OneRequestService {
config: self.config.clone(),
_t: PhantomData,
config: self.config.clone(),
})
}
}
/// `Service` implementation for HTTP1 transport. Reads one request and returns
/// request and framed object.
pub struct OneRequestService<T, P> {
pub struct OneRequestService<T> {
_t: PhantomData<T>,
config: ServiceConfig,
_t: PhantomData<(T, P)>,
}
impl<T, P> Service for OneRequestService<T, P>
impl<T> Service for OneRequestService<T>
where
T: AsyncRead + AsyncWrite,
T: AsyncRead + AsyncWrite + Unpin,
{
type Request = Io<T, P>;
type Request = T;
type Response = (Request, Framed<T, Codec>);
type Error = ParseError;
type Future = OneRequestServiceResponse<T>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, req: Self::Request) -> Self::Future {
OneRequestServiceResponse {
framed: Some(Framed::new(
req.into_parts().0,
Codec::new(self.config.clone()),
)),
framed: Some(Framed::new(req, Codec::new(self.config.clone()))),
}
}
}
#[doc(hidden)]
#[pin_project::pin_project]
pub struct OneRequestServiceResponse<T>
where
T: AsyncRead + AsyncWrite,
T: AsyncRead + AsyncWrite + Unpin,
{
#[pin]
framed: Option<Framed<T, Codec>>,
}
impl<T> Future for OneRequestServiceResponse<T>
where
T: AsyncRead + AsyncWrite,
T: AsyncRead + AsyncWrite + Unpin,
{
type Item = (Request, Framed<T, Codec>);
type Error = ParseError;
type Output = Result<(Request, Framed<T, Codec>), ParseError>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.framed.as_mut().unwrap().poll()? {
Async::Ready(Some(req)) => match req {
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.as_mut().project();
match ready!(this.framed.as_pin_mut().unwrap().next_item(cx)) {
Some(Ok(req)) => match req {
Message::Item(req) => {
Ok(Async::Ready((req, self.framed.take().unwrap())))
let mut this = self.as_mut().project();
Poll::Ready(Ok((req, this.framed.take().unwrap())))
}
Message::Chunk(_) => unreachable!("Something is wrong"),
},
Async::Ready(None) => Err(ParseError::Incomplete),
Async::NotReady => Ok(Async::NotReady),
Some(Err(err)) => Poll::Ready(Err(err)),
None => Poll::Ready(Err(ParseError::Incomplete)),
}
}
}

View File

@ -0,0 +1,41 @@
use std::marker::PhantomData;
use std::task::{Context, Poll};
use actix_codec::Framed;
use actix_service::{Service, ServiceFactory};
use futures_util::future::Ready;
use crate::error::Error;
use crate::h1::Codec;
use crate::request::Request;
pub struct UpgradeHandler<T>(PhantomData<T>);
impl<T> ServiceFactory for UpgradeHandler<T> {
type Config = ();
type Request = (Request, Framed<T, Codec>);
type Response = ();
type Error = Error;
type Service = UpgradeHandler<T>;
type InitError = Error;
type Future = Ready<Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
unimplemented!()
}
}
impl<T> Service for UpgradeHandler<T> {
type Request = (Request, Framed<T, Codec>);
type Response = ();
type Error = Error;
type Future = Ready<Result<Self::Response, Self::Error>>;
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
Poll::Ready(Ok(()))
}
fn call(&mut self, _: Self::Request) -> Self::Future {
unimplemented!()
}
}

115
actix-http/src/h1/utils.rs Normal file
View File

@ -0,0 +1,115 @@
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use crate::body::{BodySize, MessageBody, ResponseBody};
use crate::error::Error;
use crate::h1::{Codec, Message};
use crate::response::Response;
/// Send HTTP/1 response
#[pin_project::pin_project]
pub struct SendResponse<T, B> {
res: Option<Message<(Response<()>, BodySize)>>,
#[pin]
body: Option<ResponseBody<B>>,
#[pin]
framed: Option<Framed<T, Codec>>,
}
impl<T, B> SendResponse<T, B>
where
B: MessageBody,
{
pub fn new(framed: Framed<T, Codec>, response: Response<B>) -> Self {
let (res, body) = response.into_parts();
SendResponse {
res: Some((res, body.size()).into()),
body: Some(body),
framed: Some(framed),
}
}
}
impl<T, B> Future for SendResponse<T, B>
where
T: AsyncRead + AsyncWrite + Unpin,
B: MessageBody + Unpin,
{
type Output = Result<Framed<T, Codec>, Error>;
// TODO: rethink if we need loops in polls
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.as_mut().project();
let mut body_done = this.body.is_none();
loop {
let mut body_ready = !body_done;
// send body
if this.res.is_none() && body_ready {
while body_ready
&& !body_done
&& !this
.framed
.as_ref()
.as_pin_ref()
.unwrap()
.is_write_buf_full()
{
match this.body.as_mut().as_pin_mut().unwrap().poll_next(cx)? {
Poll::Ready(item) => {
// body is done when item is None
body_done = item.is_none();
if body_done {
let _ = this.body.take();
}
let framed = this.framed.as_mut().as_pin_mut().unwrap();
framed.write(Message::Chunk(item))?;
}
Poll::Pending => body_ready = false,
}
}
}
let framed = this.framed.as_mut().as_pin_mut().unwrap();
// flush write buffer
if !framed.is_write_buf_empty() {
match framed.flush(cx)? {
Poll::Ready(_) => {
if body_ready {
continue;
} else {
return Poll::Pending;
}
}
Poll::Pending => return Poll::Pending,
}
}
// send response
if let Some(res) = this.res.take() {
framed.write(res)?;
continue;
}
if !body_done {
if body_ready {
continue;
} else {
return Poll::Pending;
}
} else {
break;
}
}
let framed = this.framed.take().unwrap();
Poll::Ready(Ok(framed))
}
}

View File

@ -1,26 +1,25 @@
use std::collections::VecDeque;
use std::convert::TryFrom;
use std::future::Future;
use std::marker::PhantomData;
use std::time::Instant;
use std::{fmt, mem};
use std::net;
use std::pin::Pin;
use std::task::{Context, Poll};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::time::{Delay, Instant};
use actix_service::Service;
use actix_utils::cloneable::CloneableService;
use bitflags::bitflags;
use bytes::{Bytes, BytesMut};
use futures::{try_ready, Async, Future, Poll, Sink, Stream};
use h2::server::{Connection, SendResponse};
use h2::{RecvStream, SendStream};
use http::header::{
HeaderValue, ACCEPT_ENCODING, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING,
};
use http::HttpTryFrom;
use log::{debug, error, trace};
use tokio_timer::Delay;
use h2::SendStream;
use http::header::{HeaderValue, CONNECTION, CONTENT_LENGTH, DATE, TRANSFER_ENCODING};
use log::{error, trace};
use crate::body::{Body, BodySize, MessageBody, ResponseBody};
use crate::body::{BodySize, MessageBody, ResponseBody};
use crate::cloneable::CloneableService;
use crate::config::ServiceConfig;
use crate::error::{DispatchError, Error, ParseError, PayloadError, ResponseError};
use crate::error::{DispatchError, Error};
use crate::helpers::DataFactory;
use crate::httpmessage::HttpMessage;
use crate::message::ResponseHead;
use crate::payload::Payload;
use crate::request::Request;
@ -29,14 +28,16 @@ use crate::response::Response;
const CHUNK_SIZE: usize = 16_384;
/// Dispatcher for HTTP/2 protocol
pub struct Dispatcher<
T: AsyncRead + AsyncWrite,
S: Service<Request = Request> + 'static,
B: MessageBody,
> {
#[pin_project::pin_project]
pub struct Dispatcher<T, S: Service<Request = Request>, B: MessageBody>
where
T: AsyncRead + AsyncWrite + Unpin,
{
service: CloneableService<S>,
connection: Connection<T, Bytes>,
on_connect: Option<Box<dyn DataFactory>>,
config: ServiceConfig,
peer_addr: Option<net::SocketAddr>,
ka_expire: Instant,
ka_timer: Option<Delay>,
_t: PhantomData<B>,
@ -44,17 +45,20 @@ pub struct Dispatcher<
impl<T, S, B> Dispatcher<T, S, B>
where
T: AsyncRead + AsyncWrite,
S: Service<Request = Request> + 'static,
S::Error: fmt::Debug,
T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request = Request>,
S::Error: Into<Error>,
// S::Future: 'static,
S::Response: Into<Response<B>>,
B: MessageBody + 'static,
B: MessageBody,
{
pub fn new(
pub(crate) fn new(
service: CloneableService<S>,
connection: Connection<T, Bytes>,
on_connect: Option<Box<dyn DataFactory>>,
config: ServiceConfig,
timeout: Option<Delay>,
peer_addr: Option<net::SocketAddr>,
) -> Self {
// let keepalive = config.keep_alive_enabled();
// let flags = if keepalive {
@ -75,9 +79,11 @@ where
Dispatcher {
service,
config,
peer_addr,
connection,
on_connect,
ka_expire,
ka_timer,
connection,
_t: PhantomData,
}
}
@ -85,76 +91,100 @@ where
impl<T, S, B> Future for Dispatcher<T, S, B>
where
T: AsyncRead + AsyncWrite,
S: Service<Request = Request> + 'static,
S::Error: fmt::Debug,
S::Response: Into<Response<B>>,
T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request = Request>,
S::Error: Into<Error> + 'static,
S::Future: 'static,
S::Response: Into<Response<B>> + 'static,
B: MessageBody + 'static,
{
type Item = ();
type Error = DispatchError;
type Output = Result<(), DispatchError>;
#[inline]
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
loop {
match self.connection.poll()? {
Async::Ready(None) => return Ok(Async::Ready(())),
Async::Ready(Some((req, res))) => {
match Pin::new(&mut this.connection).poll_accept(cx) {
Poll::Ready(None) => return Poll::Ready(Ok(())),
Poll::Ready(Some(Err(err))) => return Poll::Ready(Err(err.into())),
Poll::Ready(Some(Ok((req, res)))) => {
// update keep-alive expire
if self.ka_timer.is_some() {
if let Some(expire) = self.config.keep_alive_expire() {
self.ka_expire = expire;
if this.ka_timer.is_some() {
if let Some(expire) = this.config.keep_alive_expire() {
this.ka_expire = expire;
}
}
let (parts, body) = req.into_parts();
let mut req = Request::with_payload(body.into());
let mut req = Request::with_payload(Payload::<
crate::payload::PayloadStream,
>::H2(
crate::h2::Payload::new(body)
));
let head = &mut req.head_mut();
head.uri = parts.uri;
head.method = parts.method;
head.version = parts.version;
head.headers = parts.headers;
tokio_current_thread::spawn(ServiceResponse::<S, B> {
head.headers = parts.headers.into();
head.peer_addr = this.peer_addr;
// set on_connect data
if let Some(ref on_connect) = this.on_connect {
on_connect.set(&mut req.extensions_mut());
}
actix_rt::spawn(ServiceResponse::<
S::Future,
S::Response,
S::Error,
B,
> {
state: ServiceResponseState::ServiceCall(
self.service.call(req),
this.service.call(req),
Some(res),
),
config: self.config.clone(),
config: this.config.clone(),
buffer: None,
})
_t: PhantomData,
});
}
Async::NotReady => return Ok(Async::NotReady),
Poll::Pending => return Poll::Pending,
}
}
}
}
struct ServiceResponse<S: Service, B> {
state: ServiceResponseState<S, B>,
#[pin_project::pin_project]
struct ServiceResponse<F, I, E, B> {
#[pin]
state: ServiceResponseState<F, B>,
config: ServiceConfig,
buffer: Option<Bytes>,
_t: PhantomData<(I, E)>,
}
enum ServiceResponseState<S: Service, B> {
ServiceCall(S::Future, Option<SendResponse<Bytes>>),
SendPayload(SendStream<Bytes>, ResponseBody<B>),
#[pin_project::pin_project(project = ServiceResponseStateProj)]
enum ServiceResponseState<F, B> {
ServiceCall(#[pin] F, Option<SendResponse<Bytes>>),
SendPayload(SendStream<Bytes>, #[pin] ResponseBody<B>),
}
impl<S, B> ServiceResponse<S, B>
impl<F, I, E, B> ServiceResponse<F, I, E, B>
where
S: Service<Request = Request> + 'static,
S::Error: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody + 'static,
F: Future<Output = Result<I, E>>,
E: Into<Error>,
I: Into<Response<B>>,
B: MessageBody,
{
fn prepare_response(
&self,
head: &ResponseHead,
length: &mut BodySize,
size: &mut BodySize,
) -> http::Response<()> {
let mut has_date = false;
let mut skip_len = length != &BodySize::Stream;
let mut skip_len = size != &BodySize::Stream;
let mut res = http::Response::new(());
*res.status_mut() = head.status;
@ -164,14 +194,14 @@ where
match head.status {
http::StatusCode::NO_CONTENT
| http::StatusCode::CONTINUE
| http::StatusCode::PROCESSING => *length = BodySize::None,
| http::StatusCode::PROCESSING => *size = BodySize::None,
http::StatusCode::SWITCHING_PROTOCOLS => {
skip_len = true;
*length = BodySize::Stream;
*size = BodySize::Stream;
}
_ => (),
}
let _ = match length {
let _ = match size {
BodySize::None | BodySize::Stream => None,
BodySize::Empty => res
.headers_mut()
@ -180,10 +210,6 @@ where
CONTENT_LENGTH,
HeaderValue::try_from(format!("{}", len)).unwrap(),
),
BodySize::Sized64(len) => res.headers_mut().insert(
CONTENT_LENGTH,
HeaderValue::try_from(format!("{}", len)).unwrap(),
),
};
// copy headers
@ -201,124 +227,137 @@ where
if !has_date {
let mut bytes = BytesMut::with_capacity(29);
self.config.set_date_header(&mut bytes);
res.headers_mut()
.insert(DATE, HeaderValue::try_from(bytes.freeze()).unwrap());
res.headers_mut().insert(
DATE,
// SAFETY: serialized date-times are known ASCII strings
unsafe { HeaderValue::from_maybe_shared_unchecked(bytes.freeze()) },
);
}
res
}
}
impl<S, B> Future for ServiceResponse<S, B>
impl<F, I, E, B> Future for ServiceResponse<F, I, E, B>
where
S: Service<Request = Request> + 'static,
S::Error: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody + 'static,
F: Future<Output = Result<I, E>>,
E: Into<Error>,
I: Into<Response<B>>,
B: MessageBody,
{
type Item = ();
type Error = ();
type Output = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.state {
ServiceResponseState::ServiceCall(ref mut call, ref mut send) => {
match call.poll() {
Ok(Async::Ready(res)) => {
let (res, body) = res.into().replace_body(());
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.as_mut().project();
let mut send = send.take().unwrap();
let mut length = body.length();
let h2_res = self.prepare_response(res.head(), &mut length);
match this.state.project() {
ServiceResponseStateProj::ServiceCall(call, send) => match call.poll(cx) {
Poll::Ready(Ok(res)) => {
let (res, body) = res.into().replace_body(());
let stream = send
.send_response(h2_res, length.is_eof())
.map_err(|e| {
trace!("Error sending h2 response: {:?}", e);
})?;
let mut send = send.take().unwrap();
let mut size = body.size();
let h2_res = self.as_mut().prepare_response(res.head(), &mut size);
this = self.as_mut().project();
if length.is_eof() {
Ok(Async::Ready(()))
} else {
self.state = ServiceResponseState::SendPayload(stream, body);
self.poll()
let stream = match send.send_response(h2_res, size.is_eof()) {
Err(e) => {
trace!("Error sending h2 response: {:?}", e);
return Poll::Ready(());
}
Ok(stream) => stream,
};
if size.is_eof() {
Poll::Ready(())
} else {
this.state
.set(ServiceResponseState::SendPayload(stream, body));
self.poll(cx)
}
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(_e) => {
let res: Response = Response::InternalServerError().finish();
let (res, body) = res.replace_body(());
}
Poll::Pending => Poll::Pending,
Poll::Ready(Err(e)) => {
let res: Response = e.into().into();
let (res, body) = res.replace_body(());
let mut send = send.take().unwrap();
let mut length = body.length();
let h2_res = self.prepare_response(res.head(), &mut length);
let mut send = send.take().unwrap();
let mut size = body.size();
let h2_res = self.as_mut().prepare_response(res.head(), &mut size);
this = self.as_mut().project();
let stream = send
.send_response(h2_res, length.is_eof())
.map_err(|e| {
trace!("Error sending h2 response: {:?}", e);
})?;
let stream = match send.send_response(h2_res, size.is_eof()) {
Err(e) => {
trace!("Error sending h2 response: {:?}", e);
return Poll::Ready(());
}
Ok(stream) => stream,
};
if length.is_eof() {
Ok(Async::Ready(()))
if size.is_eof() {
Poll::Ready(())
} else {
this.state.set(ServiceResponseState::SendPayload(
stream,
body.into_body(),
));
self.poll(cx)
}
}
},
ServiceResponseStateProj::SendPayload(ref mut stream, ref mut body) => {
loop {
loop {
if let Some(ref mut buffer) = this.buffer {
match stream.poll_capacity(cx) {
Poll::Pending => return Poll::Pending,
Poll::Ready(None) => return Poll::Ready(()),
Poll::Ready(Some(Ok(cap))) => {
let len = buffer.len();
let bytes = buffer.split_to(std::cmp::min(cap, len));
if let Err(e) = stream.send_data(bytes, false) {
warn!("{:?}", e);
return Poll::Ready(());
} else if !buffer.is_empty() {
let cap =
std::cmp::min(buffer.len(), CHUNK_SIZE);
stream.reserve_capacity(cap);
} else {
this.buffer.take();
}
}
Poll::Ready(Some(Err(e))) => {
warn!("{:?}", e);
return Poll::Ready(());
}
}
} else {
self.state = ServiceResponseState::SendPayload(
stream,
body.into_body(),
);
self.poll()
match body.as_mut().poll_next(cx) {
Poll::Pending => return Poll::Pending,
Poll::Ready(None) => {
if let Err(e) = stream.send_data(Bytes::new(), true)
{
warn!("{:?}", e);
}
return Poll::Ready(());
}
Poll::Ready(Some(Ok(chunk))) => {
stream.reserve_capacity(std::cmp::min(
chunk.len(),
CHUNK_SIZE,
));
*this.buffer = Some(chunk);
}
Poll::Ready(Some(Err(e))) => {
error!("Response payload stream error: {:?}", e);
return Poll::Ready(());
}
}
}
}
}
}
ServiceResponseState::SendPayload(ref mut stream, ref mut body) => loop {
loop {
if let Some(ref mut buffer) = self.buffer {
match stream.poll_capacity().map_err(|e| warn!("{:?}", e))? {
Async::NotReady => return Ok(Async::NotReady),
Async::Ready(None) => return Ok(Async::Ready(())),
Async::Ready(Some(cap)) => {
let len = buffer.len();
let bytes = buffer.split_to(std::cmp::min(cap, len));
if let Err(e) = stream.send_data(bytes, false) {
warn!("{:?}", e);
return Err(());
} else if !buffer.is_empty() {
let cap = std::cmp::min(buffer.len(), CHUNK_SIZE);
stream.reserve_capacity(cap);
} else {
self.buffer.take();
}
}
}
} else {
match body.poll_next() {
Ok(Async::NotReady) => {
return Ok(Async::NotReady);
}
Ok(Async::Ready(None)) => {
if let Err(e) = stream.send_data(Bytes::new(), true) {
warn!("{:?}", e);
return Err(());
} else {
return Ok(Async::Ready(()));
}
}
Ok(Async::Ready(Some(chunk))) => {
stream.reserve_capacity(std::cmp::min(
chunk.len(),
CHUNK_SIZE,
));
self.buffer = Some(chunk);
}
Err(e) => {
error!("Response payload stream error: {:?}", e);
return Err(());
}
}
}
}
},
}
}
}

View File

@ -1,9 +1,9 @@
#![allow(dead_code, unused_imports)]
use std::fmt;
//! HTTP/2 implementation
use std::pin::Pin;
use std::task::{Context, Poll};
use bytes::Bytes;
use futures::{Async, Poll, Stream};
use futures_core::Stream;
use h2::RecvStream;
mod dispatcher;
@ -25,22 +25,26 @@ impl Payload {
}
impl Stream for Payload {
type Item = Bytes;
type Error = PayloadError;
type Item = Result<Bytes, PayloadError>;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
match self.pl.poll() {
Ok(Async::Ready(Some(chunk))) => {
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
let this = self.get_mut();
match Pin::new(&mut this.pl).poll_data(cx) {
Poll::Ready(Some(Ok(chunk))) => {
let len = chunk.len();
if let Err(err) = self.pl.release_capacity().release_capacity(len) {
Err(err.into())
if let Err(err) = this.pl.flow_control().release_capacity(len) {
Poll::Ready(Some(Err(err.into())))
} else {
Ok(Async::Ready(Some(chunk)))
Poll::Ready(Some(Ok(chunk)))
}
}
Ok(Async::Ready(None)) => Ok(Async::Ready(None)),
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(err) => Err(err.into()),
Poll::Ready(Some(Err(err))) => Poll::Ready(Some(Err(err.into()))),
Poll::Pending => Poll::Pending,
Poll::Ready(None) => Poll::Ready(None),
}
}
}

View File

@ -1,192 +1,341 @@
use std::fmt::Debug;
use std::future::Future;
use std::marker::PhantomData;
use std::{io, net};
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{net, rc};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_server_config::{Io, ServerConfig as SrvConfig};
use actix_service::{IntoNewService, NewService, Service};
use actix_utils::cloneable::CloneableService;
use actix_codec::{AsyncRead, AsyncWrite};
use actix_rt::net::TcpStream;
use actix_service::{
fn_factory, fn_service, pipeline_factory, IntoServiceFactory, Service,
ServiceFactory,
};
use bytes::Bytes;
use futures::future::{ok, FutureResult};
use futures::{try_ready, Async, Future, IntoFuture, Poll, Stream};
use h2::server::{self, Connection, Handshake};
use h2::RecvStream;
use futures_core::ready;
use futures_util::future::ok;
use h2::server::{self, Handshake};
use log::error;
use crate::body::MessageBody;
use crate::config::{KeepAlive, ServiceConfig};
use crate::error::{DispatchError, Error, ParseError, ResponseError};
use crate::payload::Payload;
use crate::cloneable::CloneableService;
use crate::config::ServiceConfig;
use crate::error::{DispatchError, Error};
use crate::helpers::DataFactory;
use crate::request::Request;
use crate::response::Response;
use super::dispatcher::Dispatcher;
/// `NewService` implementation for HTTP2 transport
pub struct H2Service<T, P, S, B> {
/// `ServiceFactory` implementation for HTTP2 transport
pub struct H2Service<T, S, B> {
srv: S,
cfg: ServiceConfig,
_t: PhantomData<(T, P, B)>,
on_connect: Option<rc::Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
_t: PhantomData<(T, B)>,
}
impl<T, P, S, B> H2Service<T, P, S, B>
impl<T, S, B> H2Service<T, S, B>
where
S: NewService<SrvConfig, Request = Request>,
S::Service: 'static,
S::Error: Debug + 'static,
S::Response: Into<Response<B>>,
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error> + 'static,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
{
/// Create new `HttpService` instance.
pub fn new<F: IntoNewService<S, SrvConfig>>(service: F) -> Self {
let cfg = ServiceConfig::new(KeepAlive::Timeout(5), 5000, 0);
H2Service {
cfg,
srv: service.into_new_service(),
_t: PhantomData,
}
}
/// Create new `HttpService` instance with config.
pub fn with_config<F: IntoNewService<S, SrvConfig>>(
pub(crate) fn with_config<F: IntoServiceFactory<S>>(
cfg: ServiceConfig,
service: F,
) -> Self {
H2Service {
cfg,
srv: service.into_new_service(),
on_connect: None,
srv: service.into_factory(),
_t: PhantomData,
}
}
/// Set on connect callback.
pub(crate) fn on_connect(
mut self,
f: Option<rc::Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
) -> Self {
self.on_connect = f;
self
}
}
impl<S, B> H2Service<TcpStream, S, B>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error> + 'static,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
{
/// Create simple tcp based service
pub fn tcp(
self,
) -> impl ServiceFactory<
Config = (),
Request = TcpStream,
Response = (),
Error = DispatchError,
InitError = S::InitError,
> {
pipeline_factory(fn_factory(|| async {
Ok::<_, S::InitError>(fn_service(|io: TcpStream| {
let peer_addr = io.peer_addr().ok();
ok::<_, DispatchError>((io, peer_addr))
}))
}))
.and_then(self)
}
}
#[cfg(feature = "openssl")]
mod openssl {
use actix_service::{fn_factory, fn_service};
use actix_tls::openssl::{Acceptor, SslAcceptor, SslStream};
use actix_tls::{openssl::HandshakeError, TlsError};
use super::*;
impl<S, B> H2Service<SslStream<TcpStream>, S, B>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error> + 'static,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
{
/// Create ssl based service
pub fn openssl(
self,
acceptor: SslAcceptor,
) -> impl ServiceFactory<
Config = (),
Request = TcpStream,
Response = (),
Error = TlsError<HandshakeError<TcpStream>, DispatchError>,
InitError = S::InitError,
> {
pipeline_factory(
Acceptor::new(acceptor)
.map_err(TlsError::Tls)
.map_init_err(|_| panic!()),
)
.and_then(fn_factory(|| {
ok::<_, S::InitError>(fn_service(|io: SslStream<TcpStream>| {
let peer_addr = io.get_ref().peer_addr().ok();
ok((io, peer_addr))
}))
}))
.and_then(self.map_err(TlsError::Service))
}
}
}
impl<T, P, S, B> NewService<SrvConfig> for H2Service<T, P, S, B>
#[cfg(feature = "rustls")]
mod rustls {
use super::*;
use actix_tls::rustls::{Acceptor, ServerConfig, TlsStream};
use actix_tls::TlsError;
use std::io;
impl<S, B> H2Service<TlsStream<TcpStream>, S, B>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error> + 'static,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
{
/// Create openssl based service
pub fn rustls(
self,
mut config: ServerConfig,
) -> impl ServiceFactory<
Config = (),
Request = TcpStream,
Response = (),
Error = TlsError<io::Error, DispatchError>,
InitError = S::InitError,
> {
let protos = vec!["h2".to_string().into()];
config.set_protocols(&protos);
pipeline_factory(
Acceptor::new(config)
.map_err(TlsError::Tls)
.map_init_err(|_| panic!()),
)
.and_then(fn_factory(|| {
ok::<_, S::InitError>(fn_service(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().0.peer_addr().ok();
ok((io, peer_addr))
}))
}))
.and_then(self.map_err(TlsError::Service))
}
}
}
impl<T, S, B> ServiceFactory for H2Service<T, S, B>
where
T: AsyncRead + AsyncWrite,
S: NewService<SrvConfig, Request = Request>,
S::Service: 'static,
S::Error: Debug,
S::Response: Into<Response<B>>,
T: AsyncRead + AsyncWrite + Unpin,
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error> + 'static,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
{
type Request = Io<T, P>;
type Config = ();
type Request = (T, Option<net::SocketAddr>);
type Response = ();
type Error = DispatchError;
type InitError = S::InitError;
type Service = H2ServiceHandler<T, P, S::Service, B>;
type Future = H2ServiceResponse<T, P, S, B>;
type Service = H2ServiceHandler<T, S::Service, B>;
type Future = H2ServiceResponse<T, S, B>;
fn new_service(&self, cfg: &SrvConfig) -> Self::Future {
fn new_service(&self, _: ()) -> Self::Future {
H2ServiceResponse {
fut: self.srv.new_service(cfg).into_future(),
fut: self.srv.new_service(()),
cfg: Some(self.cfg.clone()),
on_connect: self.on_connect.clone(),
_t: PhantomData,
}
}
}
#[doc(hidden)]
pub struct H2ServiceResponse<T, P, S: NewService<SrvConfig, Request = Request>, B> {
fut: <S::Future as IntoFuture>::Future,
#[pin_project::pin_project]
pub struct H2ServiceResponse<T, S: ServiceFactory, B> {
#[pin]
fut: S::Future,
cfg: Option<ServiceConfig>,
_t: PhantomData<(T, P, B)>,
on_connect: Option<rc::Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
_t: PhantomData<(T, B)>,
}
impl<T, P, S, B> Future for H2ServiceResponse<T, P, S, B>
impl<T, S, B> Future for H2ServiceResponse<T, S, B>
where
T: AsyncRead + AsyncWrite,
S: NewService<SrvConfig, Request = Request>,
S::Service: 'static,
S::Response: Into<Response<B>>,
S::Error: Debug,
T: AsyncRead + AsyncWrite + Unpin,
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error> + 'static,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
{
type Item = H2ServiceHandler<T, P, S::Service, B>;
type Error = S::InitError;
type Output = Result<H2ServiceHandler<T, S::Service, B>, S::InitError>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let service = try_ready!(self.fut.poll());
Ok(Async::Ready(H2ServiceHandler::new(
self.cfg.take().unwrap(),
service,
)))
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.as_mut().project();
Poll::Ready(ready!(this.fut.poll(cx)).map(|service| {
let this = self.as_mut().project();
H2ServiceHandler::new(
this.cfg.take().unwrap(),
this.on_connect.clone(),
service,
)
}))
}
}
/// `Service` implementation for http/2 transport
pub struct H2ServiceHandler<T, P, S: 'static, B> {
pub struct H2ServiceHandler<T, S: Service, B> {
srv: CloneableService<S>,
cfg: ServiceConfig,
_t: PhantomData<(T, P, B)>,
on_connect: Option<rc::Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
_t: PhantomData<(T, B)>,
}
impl<T, P, S, B> H2ServiceHandler<T, P, S, B>
impl<T, S, B> H2ServiceHandler<T, S, B>
where
S: Service<Request = Request> + 'static,
S::Error: Debug,
S::Response: Into<Response<B>>,
S: Service<Request = Request>,
S::Error: Into<Error> + 'static,
S::Future: 'static,
S::Response: Into<Response<B>> + 'static,
B: MessageBody + 'static,
{
fn new(cfg: ServiceConfig, srv: S) -> H2ServiceHandler<T, P, S, B> {
fn new(
cfg: ServiceConfig,
on_connect: Option<rc::Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
srv: S,
) -> H2ServiceHandler<T, S, B> {
H2ServiceHandler {
cfg,
on_connect,
srv: CloneableService::new(srv),
_t: PhantomData,
}
}
}
impl<T, P, S, B> Service for H2ServiceHandler<T, P, S, B>
impl<T, S, B> Service for H2ServiceHandler<T, S, B>
where
T: AsyncRead + AsyncWrite,
S: Service<Request = Request> + 'static,
S::Error: Debug,
S::Response: Into<Response<B>>,
T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request = Request>,
S::Error: Into<Error> + 'static,
S::Future: 'static,
S::Response: Into<Response<B>> + 'static,
B: MessageBody + 'static,
{
type Request = Io<T, P>;
type Request = (T, Option<net::SocketAddr>);
type Response = ();
type Error = DispatchError;
type Future = H2ServiceHandlerResponse<T, S, B>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.srv.poll_ready().map_err(|e| {
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.srv.poll_ready(cx).map_err(|e| {
let e = e.into();
error!("Service readiness error: {:?}", e);
DispatchError::Service
DispatchError::Service(e)
})
}
fn call(&mut self, req: Self::Request) -> Self::Future {
fn call(&mut self, (io, addr): Self::Request) -> Self::Future {
let on_connect = if let Some(ref on_connect) = self.on_connect {
Some(on_connect(&io))
} else {
None
};
H2ServiceHandlerResponse {
state: State::Handshake(
Some(self.srv.clone()),
Some(self.cfg.clone()),
server::handshake(req.into_parts().0),
addr,
on_connect,
server::handshake(io),
),
}
}
}
enum State<
T: AsyncRead + AsyncWrite,
S: Service<Request = Request> + 'static,
B: MessageBody,
> {
enum State<T, S: Service<Request = Request>, B: MessageBody>
where
T: AsyncRead + AsyncWrite + Unpin,
S::Future: 'static,
{
Incoming(Dispatcher<T, S, B>),
Handshake(
Option<CloneableService<S>>,
Option<ServiceConfig>,
Option<net::SocketAddr>,
Option<Box<dyn DataFactory>>,
Handshake<T, Bytes>,
),
}
pub struct H2ServiceHandlerResponse<T, S, B>
where
T: AsyncRead + AsyncWrite,
S: Service<Request = Request> + 'static,
S::Error: Debug,
S::Response: Into<Response<B>>,
T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request = Request>,
S::Error: Into<Error> + 'static,
S::Future: 'static,
S::Response: Into<Response<B>> + 'static,
B: MessageBody + 'static,
{
state: State<T, S, B>,
@ -194,36 +343,42 @@ where
impl<T, S, B> Future for H2ServiceHandlerResponse<T, S, B>
where
T: AsyncRead + AsyncWrite,
S: Service<Request = Request> + 'static,
S::Error: Debug,
S::Response: Into<Response<B>>,
T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request = Request>,
S::Error: Into<Error> + 'static,
S::Future: 'static,
S::Response: Into<Response<B>> + 'static,
B: MessageBody,
{
type Item = ();
type Error = DispatchError;
type Output = Result<(), DispatchError>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.state {
State::Incoming(ref mut disp) => disp.poll(),
State::Handshake(ref mut srv, ref mut config, ref mut handshake) => {
match handshake.poll() {
Ok(Async::Ready(conn)) => {
self.state = State::Incoming(Dispatcher::new(
srv.take().unwrap(),
conn,
config.take().unwrap(),
None,
));
self.poll()
}
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(err) => {
trace!("H2 handshake error: {}", err);
return Err(err.into());
}
State::Incoming(ref mut disp) => Pin::new(disp).poll(cx),
State::Handshake(
ref mut srv,
ref mut config,
ref peer_addr,
ref mut on_connect,
ref mut handshake,
) => match Pin::new(handshake).poll(cx) {
Poll::Ready(Ok(conn)) => {
self.state = State::Incoming(Dispatcher::new(
srv.take().unwrap(),
conn,
on_connect.take(),
config.take().unwrap(),
None,
*peer_addr,
));
self.poll(cx)
}
}
Poll::Ready(Err(err)) => {
trace!("H2 handshake error: {}", err);
Poll::Ready(Err(err.into()))
}
Poll::Pending => Poll::Pending,
},
}
}
}

View File

@ -63,7 +63,7 @@ header! {
(AcceptCharset, ACCEPT_CHARSET) => (QualityItem<Charset>)+
test_accept_charset {
/// Test case from RFC
// Test case from RFC
test_header!(test1, vec![b"iso-8859-5, unicode-1-1;q=0.8"]);
}
}

View File

@ -1,5 +1,5 @@
use http::Method;
use http::header;
use http::Method;
header! {
/// `Allow` header, defined in [RFC7231](http://tools.ietf.org/html/rfc7231#section-7.4.1)

View File

@ -64,7 +64,7 @@ impl Header for CacheControl {
where
T: crate::HttpMessage,
{
let directives = from_comma_delimited(msg.headers().get_all(Self::name()))?;
let directives = from_comma_delimited(msg.headers().get_all(&Self::name()))?;
if !directives.is_empty() {
Ok(CacheControl(directives))
} else {
@ -74,18 +74,18 @@ impl Header for CacheControl {
}
impl fmt::Display for CacheControl {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt_comma_delimited(f, &self[..])
}
}
impl IntoHeaderValue for CacheControl {
type Error = header::InvalidHeaderValueBytes;
type Error = header::InvalidHeaderValue;
fn try_into(self) -> Result<header::HeaderValue, Self::Error> {
let mut writer = Writer::new();
let _ = write!(&mut writer, "{}", self);
header::HeaderValue::from_shared(writer.take())
header::HeaderValue::from_maybe_shared(writer.take())
}
}
@ -126,7 +126,7 @@ pub enum CacheDirective {
}
impl fmt::Display for CacheDirective {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use self::CacheDirective::*;
fmt::Display::fmt(
match *self {

View File

@ -70,11 +70,17 @@ impl<'a> From<&'a str> for DispositionType {
/// assert_eq!(param.as_filename().unwrap(), "sample.txt");
/// ```
#[derive(Clone, Debug, PartialEq)]
#[allow(clippy::large_enum_variant)]
pub enum DispositionParam {
/// For [`DispositionType::FormData`] (i.e. *multipart/form-data*), the name of an field from
/// the form.
Name(String),
/// A plain file name.
///
/// It is [not supposed](https://tools.ietf.org/html/rfc6266#appendix-D) to contain any
/// non-ASCII characters when used in a *Content-Disposition* HTTP response header, where
/// [`FilenameExt`](DispositionParam::FilenameExt) with charset UTF-8 may be used instead
/// in case there are Unicode characters in file names.
Filename(String),
/// An extended file name. It must not exist for `ContentType::Formdata` according to
/// [RFC7578 Section 4.2](https://tools.ietf.org/html/rfc7578#section-4.2).
@ -84,40 +90,40 @@ pub enum DispositionParam {
/// [RFC6266](https://tools.ietf.org/html/rfc6266) as *token "=" value*. Recipients should
/// ignore unrecognizable parameters.
Unknown(String, String),
/// An unrecognized extended paramater as defined in
/// An unrecognized extended parameter as defined in
/// [RFC5987](https://tools.ietf.org/html/rfc5987) as *ext-parameter*, in
/// [RFC6266](https://tools.ietf.org/html/rfc6266) as *ext-token "=" ext-value*. The single
/// trailling asterisk is not included. Recipients should ignore unrecognizable parameters.
/// trailing asterisk is not included. Recipients should ignore unrecognizable parameters.
UnknownExt(String, ExtendedValue),
}
impl DispositionParam {
/// Returns `true` if the paramater is [`Name`](DispositionParam::Name).
/// Returns `true` if the parameter is [`Name`](DispositionParam::Name).
#[inline]
pub fn is_name(&self) -> bool {
self.as_name().is_some()
}
/// Returns `true` if the paramater is [`Filename`](DispositionParam::Filename).
/// Returns `true` if the parameter is [`Filename`](DispositionParam::Filename).
#[inline]
pub fn is_filename(&self) -> bool {
self.as_filename().is_some()
}
/// Returns `true` if the paramater is [`FilenameExt`](DispositionParam::FilenameExt).
/// Returns `true` if the parameter is [`FilenameExt`](DispositionParam::FilenameExt).
#[inline]
pub fn is_filename_ext(&self) -> bool {
self.as_filename_ext().is_some()
}
/// Returns `true` if the paramater is [`Unknown`](DispositionParam::Unknown) and the `name`
/// Returns `true` if the parameter is [`Unknown`](DispositionParam::Unknown) and the `name`
#[inline]
/// matches.
pub fn is_unknown<T: AsRef<str>>(&self, name: T) -> bool {
self.as_unknown(name).is_some()
}
/// Returns `true` if the paramater is [`UnknownExt`](DispositionParam::UnknownExt) and the
/// Returns `true` if the parameter is [`UnknownExt`](DispositionParam::UnknownExt) and the
/// `name` matches.
#[inline]
pub fn is_unknown_ext<T: AsRef<str>>(&self, name: T) -> bool {
@ -219,7 +225,16 @@ impl DispositionParam {
/// ext-token = <the characters in token, followed by "*">
/// ```
///
/// **Note**: filename* [must not](https://tools.ietf.org/html/rfc7578#section-4.2) be used within
/// # Note
///
/// filename is [not supposed](https://tools.ietf.org/html/rfc6266#appendix-D) to contain any
/// non-ASCII characters when used in a *Content-Disposition* HTTP response header, where
/// filename* with charset UTF-8 may be used instead in case there are Unicode characters in file
/// names.
/// filename is [acceptable](https://tools.ietf.org/html/rfc7578#section-4.2) to be UTF-8 encoded
/// directly in a *Content-Disposition* header for *multipart/form-data*, though.
///
/// filename* [must not](https://tools.ietf.org/html/rfc7578#section-4.2) be used within
/// *multipart/form-data*.
///
/// # Example
@ -250,13 +265,29 @@ impl DispositionParam {
/// };
/// assert_eq!(cd2.get_name(), Some("file")); // field name
/// assert_eq!(cd2.get_filename(), Some("bill.odt"));
///
/// // HTTP response header with Unicode characters in file names
/// let cd3 = ContentDisposition {
/// disposition: DispositionType::Attachment,
/// parameters: vec![
/// DispositionParam::FilenameExt(ExtendedValue {
/// charset: Charset::Ext(String::from("UTF-8")),
/// language_tag: None,
/// value: String::from("\u{1f600}.svg").into_bytes(),
/// }),
/// // fallback for better compatibility
/// DispositionParam::Filename(String::from("Grinning-Face-Emoji.svg"))
/// ],
/// };
/// assert_eq!(cd3.get_filename_ext().map(|ev| ev.value.as_ref()),
/// Some("\u{1f600}.svg".as_bytes()));
/// ```
///
/// # WARN
/// # Security Note
///
/// If "filename" parameter is supplied, do not use the file name blindly, check and possibly
/// change to match local file system conventions if applicable, and do not use directory path
/// information that may be present. See [RFC2183](https://tools.ietf.org/html/rfc2183#section-2.3)
/// .
/// information that may be present. See [RFC2183](https://tools.ietf.org/html/rfc2183#section-2.3).
#[derive(Clone, Debug, PartialEq)]
pub struct ContentDisposition {
/// The disposition type
@ -332,15 +363,17 @@ impl ContentDisposition {
// token: won't contains semicolon according to RFC 2616 Section 2.2
let (token, new_left) = split_once_and_trim(left, ';');
left = new_left;
if token.is_empty() {
// quoted-string can be empty, but token cannot be empty
return Err(crate::error::ParseError::Header);
}
token.to_owned()
};
if value.is_empty() {
return Err(crate::error::ParseError::Header);
}
let param = if param_name.eq_ignore_ascii_case("name") {
DispositionParam::Name(value)
} else if param_name.eq_ignore_ascii_case("filename") {
// See also comments in test_from_raw_unnecessary_percent_decode.
DispositionParam::Filename(value)
} else {
DispositionParam::Unknown(param_name.to_owned(), value)
@ -354,26 +387,17 @@ impl ContentDisposition {
/// Returns `true` if it is [`Inline`](DispositionType::Inline).
pub fn is_inline(&self) -> bool {
match self.disposition {
DispositionType::Inline => true,
_ => false,
}
matches!(self.disposition, DispositionType::Inline)
}
/// Returns `true` if it is [`Attachment`](DispositionType::Attachment).
pub fn is_attachment(&self) -> bool {
match self.disposition {
DispositionType::Attachment => true,
_ => false,
}
matches!(self.disposition, DispositionType::Attachment)
}
/// Returns `true` if it is [`FormData`](DispositionType::FormData).
pub fn is_form_data(&self) -> bool {
match self.disposition {
DispositionType::FormData => true,
_ => false,
}
matches!(self.disposition, DispositionType::FormData)
}
/// Returns `true` if it is [`Ext`](DispositionType::Ext) and the `disp_type` matches.
@ -390,7 +414,7 @@ impl ContentDisposition {
/// Return the value of *name* if exists.
pub fn get_name(&self) -> Option<&str> {
self.parameters.iter().filter_map(|p| p.as_name()).nth(0)
self.parameters.iter().filter_map(|p| p.as_name()).next()
}
/// Return the value of *filename* if exists.
@ -398,7 +422,7 @@ impl ContentDisposition {
self.parameters
.iter()
.filter_map(|p| p.as_filename())
.nth(0)
.next()
}
/// Return the value of *filename\** if exists.
@ -406,7 +430,7 @@ impl ContentDisposition {
self.parameters
.iter()
.filter_map(|p| p.as_filename_ext())
.nth(0)
.next()
}
/// Return the value of the parameter which the `name` matches.
@ -415,7 +439,7 @@ impl ContentDisposition {
self.parameters
.iter()
.filter_map(|p| p.as_unknown(name))
.nth(0)
.next()
}
/// Return the value of the extended parameter which the `name` matches.
@ -424,17 +448,17 @@ impl ContentDisposition {
self.parameters
.iter()
.filter_map(|p| p.as_unknown_ext(name))
.nth(0)
.next()
}
}
impl IntoHeaderValue for ContentDisposition {
type Error = header::InvalidHeaderValueBytes;
type Error = header::InvalidHeaderValue;
fn try_into(self) -> Result<header::HeaderValue, Self::Error> {
let mut writer = Writer::new();
let _ = write!(&mut writer, "{}", self);
header::HeaderValue::from_shared(writer.take())
header::HeaderValue::from_maybe_shared(writer.take())
}
}
@ -444,7 +468,7 @@ impl Header for ContentDisposition {
}
fn parse<T: crate::HttpMessage>(msg: &T) -> Result<Self, crate::error::ParseError> {
if let Some(h) = msg.headers().get(Self::name()) {
if let Some(h) = msg.headers().get(&Self::name()) {
Self::from_raw(&h)
} else {
Err(crate::error::ParseError::Header)
@ -453,7 +477,7 @@ impl Header for ContentDisposition {
}
impl fmt::Display for DispositionType {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
DispositionType::Inline => write!(f, "inline"),
DispositionType::Attachment => write!(f, "attachment"),
@ -464,12 +488,41 @@ impl fmt::Display for DispositionType {
}
impl fmt::Display for DispositionParam {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// All ASCII control charaters (0-30, 127) excepting horizontal tab, double quote, and
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
// All ASCII control characters (0-30, 127) including horizontal tab, double quote, and
// backslash should be escaped in quoted-string (i.e. "foobar").
// Ref: RFC6266 S4.1 -> RFC2616 S2.2; RFC 7578 S4.2 -> RFC2183 S2 -> ... .
// Ref: RFC6266 S4.1 -> RFC2616 S3.6
// filename-parm = "filename" "=" value
// value = token | quoted-string
// quoted-string = ( <"> *(qdtext | quoted-pair ) <"> )
// qdtext = <any TEXT except <">>
// quoted-pair = "\" CHAR
// TEXT = <any OCTET except CTLs,
// but including LWS>
// LWS = [CRLF] 1*( SP | HT )
// OCTET = <any 8-bit sequence of data>
// CHAR = <any US-ASCII character (octets 0 - 127)>
// CTL = <any US-ASCII control character
// (octets 0 - 31) and DEL (127)>
//
// Ref: RFC7578 S4.2 -> RFC2183 S2 -> RFC2045 S5.1
// parameter := attribute "=" value
// attribute := token
// ; Matching of attributes
// ; is ALWAYS case-insensitive.
// value := token / quoted-string
// token := 1*<any (US-ASCII) CHAR except SPACE, CTLs,
// or tspecials>
// tspecials := "(" / ")" / "<" / ">" / "@" /
// "," / ";" / ":" / "\" / <">
// "/" / "[" / "]" / "?" / "="
// ; Must be in quoted-string,
// ; to use within parameter values
//
//
// See also comments in test_from_raw_unnecessary_percent_decode.
lazy_static! {
static ref RE: Regex = Regex::new("[\x01-\x08\x10\x1F\x7F\"\\\\]").unwrap();
static ref RE: Regex = Regex::new("[\x00-\x08\x10-\x1F\x7F\"\\\\]").unwrap();
}
match self {
DispositionParam::Name(ref value) => write!(f, "name={}", value),
@ -493,7 +546,7 @@ impl fmt::Display for DispositionParam {
}
impl fmt::Display for ContentDisposition {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", self.disposition)?;
self.parameters
.iter()
@ -614,7 +667,7 @@ mod tests {
fn test_from_raw_unordered() {
let a = HeaderValue::from_static(
"form-data; dummy=3; filename=\"sample.png\" ; name=upload;",
// Actually, a trailling semolocon is not compliant. But it is fine to accept.
// Actually, a trailing semicolon is not compliant. But it is fine to accept.
);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
@ -706,9 +759,8 @@ mod tests {
Mainstream browsers like Firefox (gecko) and Chrome use UTF-8 directly as above.
(And now, only UTF-8 is handled by this implementation.)
*/
let a =
HeaderValue::from_str("form-data; name=upload; filename=\"文件.webp\"")
.unwrap();
let a = HeaderValue::from_str("form-data; name=upload; filename=\"文件.webp\"")
.unwrap();
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::FormData,
@ -719,8 +771,10 @@ mod tests {
};
assert_eq!(a, b);
let a =
HeaderValue::from_str("form-data; name=upload; filename=\"余固知謇謇之為患兮,忍而不能舍也.pptx\"").unwrap();
let a = HeaderValue::from_str(
"form-data; name=upload; filename=\"余固知謇謇之為患兮,忍而不能舍也.pptx\"",
)
.unwrap();
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
disposition: DispositionType::FormData,
@ -770,9 +824,19 @@ mod tests {
}
#[test]
fn test_from_raw_uncessary_percent_decode() {
fn test_from_raw_unnecessary_percent_decode() {
// In fact, RFC7578 (multipart/form-data) Section 2 and 4.2 suggests that filename with
// non-ASCII characters MAY be percent-encoded.
// On the contrary, RFC6266 or other RFCs related to Content-Disposition response header
// do not mention such percent-encoding.
// So, it appears to be undecidable whether to percent-decode or not without
// knowing the usage scenario (multipart/form-data v.s. HTTP response header) and
// inevitable to unnecessarily percent-decode filename with %XX in the former scenario.
// Fortunately, it seems that almost all mainstream browsers just send UTF-8 encoded file
// names in quoted-string format (tested on Edge, IE11, Chrome and Firefox) without
// percent-encoding. So we do not bother to attempt to percent-decode.
let a = HeaderValue::from_static(
"form-data; name=photo; filename=\"%74%65%73%74%2e%70%6e%67\"", // Should not be decoded!
"form-data; name=photo; filename=\"%74%65%73%74%2e%70%6e%67\"",
);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
@ -808,6 +872,13 @@ mod tests {
let a = HeaderValue::from_static("inline; filename= ");
assert!(ContentDisposition::from_raw(&a).is_err());
let a = HeaderValue::from_static("inline; filename=\"\"");
assert!(ContentDisposition::from_raw(&a)
.expect("parse cd")
.get_filename()
.expect("filename")
.is_empty());
}
#[test]

View File

@ -3,7 +3,7 @@ use std::str::FromStr;
use crate::error::ParseError;
use crate::header::{
HeaderValue, IntoHeaderValue, InvalidHeaderValueBytes, Writer, CONTENT_RANGE,
HeaderValue, IntoHeaderValue, InvalidHeaderValue, Writer, CONTENT_RANGE,
};
header! {
@ -166,7 +166,7 @@ impl FromStr for ContentRangeSpec {
}
impl Display for ContentRangeSpec {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
ContentRangeSpec::Bytes {
range,
@ -198,11 +198,11 @@ impl Display for ContentRangeSpec {
}
impl IntoHeaderValue for ContentRangeSpec {
type Error = InvalidHeaderValueBytes;
type Error = InvalidHeaderValue;
fn try_into(self) -> Result<HeaderValue, Self::Error> {
let mut writer = Writer::new();
let _ = write!(&mut writer, "{}", self);
HeaderValue::from_shared(writer.take())
HeaderValue::from_maybe_shared(writer.take())
}
}

View File

@ -3,7 +3,7 @@ use std::fmt::{self, Display, Write};
use crate::error::ParseError;
use crate::header::{
self, from_one_raw_str, EntityTag, Header, HeaderName, HeaderValue, HttpDate,
IntoHeaderValue, InvalidHeaderValueBytes, Writer,
IntoHeaderValue, InvalidHeaderValue, Writer,
};
use crate::httpmessage::HttpMessage;
@ -73,12 +73,12 @@ impl Header for IfRange {
T: HttpMessage,
{
let etag: Result<EntityTag, _> =
from_one_raw_str(msg.headers().get(header::IF_RANGE));
from_one_raw_str(msg.headers().get(&header::IF_RANGE));
if let Ok(etag) = etag {
return Ok(IfRange::EntityTag(etag));
}
let date: Result<HttpDate, _> =
from_one_raw_str(msg.headers().get(header::IF_RANGE));
from_one_raw_str(msg.headers().get(&header::IF_RANGE));
if let Ok(date) = date {
return Ok(IfRange::Date(date));
}
@ -87,7 +87,7 @@ impl Header for IfRange {
}
impl Display for IfRange {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
IfRange::EntityTag(ref x) => Display::fmt(x, f),
IfRange::Date(ref x) => Display::fmt(x, f),
@ -96,12 +96,12 @@ impl Display for IfRange {
}
impl IntoHeaderValue for IfRange {
type Error = InvalidHeaderValueBytes;
type Error = InvalidHeaderValue;
fn try_into(self) -> Result<HeaderValue, Self::Error> {
let mut writer = Writer::new();
let _ = write!(&mut writer, "{}", self);
HeaderValue::from_shared(writer.take())
HeaderValue::from_maybe_shared(writer.take())
}
}

View File

@ -9,11 +9,13 @@
pub use self::accept_charset::AcceptCharset;
//pub use self::accept_encoding::AcceptEncoding;
pub use self::accept_language::AcceptLanguage;
pub use self::accept::Accept;
pub use self::accept_language::AcceptLanguage;
pub use self::allow::Allow;
pub use self::cache_control::{CacheControl, CacheDirective};
pub use self::content_disposition::{ContentDisposition, DispositionType, DispositionParam};
pub use self::content_disposition::{
ContentDisposition, DispositionParam, DispositionType,
};
pub use self::content_language::ContentLanguage;
pub use self::content_range::{ContentRange, ContentRangeSpec};
pub use self::content_type::ContentType;
@ -47,7 +49,7 @@ macro_rules! __hyper__deref {
&mut self.0
}
}
}
};
}
#[doc(hidden)]
@ -74,8 +76,8 @@ macro_rules! test_header {
($id:ident, $raw:expr) => {
#[test]
fn $id() {
use $crate::test;
use super::*;
use $crate::test;
let raw = $raw;
let a: Vec<Vec<u8>> = raw.iter().map(|x| x.to_vec()).collect();
@ -118,7 +120,7 @@ macro_rules! test_header {
// Test formatting
if typed.is_some() {
let raw = &($raw)[..];
let mut iter = raw.iter().map(|b|str::from_utf8(&b[..]).unwrap());
let mut iter = raw.iter().map(|b| str::from_utf8(&b[..]).unwrap());
let mut joined = String::new();
joined.push_str(iter.next().unwrap());
for s in iter {
@ -128,7 +130,7 @@ macro_rules! test_header {
assert_eq!(format!("{}", typed.unwrap()), joined);
}
}
}
};
}
#[macro_export]
@ -159,18 +161,18 @@ macro_rules! header {
}
impl std::fmt::Display for $id {
#[inline]
fn fmt(&self, f: &mut std::fmt::Formatter) -> ::std::fmt::Result {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> ::std::fmt::Result {
$crate::http::header::fmt_comma_delimited(f, &self.0[..])
}
}
impl $crate::http::header::IntoHeaderValue for $id {
type Error = $crate::http::header::InvalidHeaderValueBytes;
type Error = $crate::http::header::InvalidHeaderValue;
fn try_into(self) -> Result<$crate::http::header::HeaderValue, Self::Error> {
use std::fmt::Write;
let mut writer = $crate::http::header::Writer::new();
let _ = write!(&mut writer, "{}", self);
$crate::http::header::HeaderValue::from_shared(writer.take())
$crate::http::header::HeaderValue::from_maybe_shared(writer.take())
}
}
};
@ -195,18 +197,18 @@ macro_rules! header {
}
impl std::fmt::Display for $id {
#[inline]
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
$crate::http::header::fmt_comma_delimited(f, &self.0[..])
}
}
impl $crate::http::header::IntoHeaderValue for $id {
type Error = $crate::http::header::InvalidHeaderValueBytes;
type Error = $crate::http::header::InvalidHeaderValue;
fn try_into(self) -> Result<$crate::http::header::HeaderValue, Self::Error> {
use std::fmt::Write;
let mut writer = $crate::http::header::Writer::new();
let _ = write!(&mut writer, "{}", self);
$crate::http::header::HeaderValue::from_shared(writer.take())
$crate::http::header::HeaderValue::from_maybe_shared(writer.take())
}
}
};
@ -231,12 +233,12 @@ macro_rules! header {
}
impl std::fmt::Display for $id {
#[inline]
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(&self.0, f)
}
}
impl $crate::http::header::IntoHeaderValue for $id {
type Error = $crate::http::header::InvalidHeaderValueBytes;
type Error = $crate::http::header::InvalidHeaderValue;
fn try_into(self) -> Result<$crate::http::header::HeaderValue, Self::Error> {
self.0.try_into()
@ -276,7 +278,7 @@ macro_rules! header {
}
impl std::fmt::Display for $id {
#[inline]
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match *self {
$id::Any => f.write_str("*"),
$id::Items(ref fields) => $crate::http::header::fmt_comma_delimited(
@ -285,13 +287,13 @@ macro_rules! header {
}
}
impl $crate::http::header::IntoHeaderValue for $id {
type Error = $crate::http::header::InvalidHeaderValueBytes;
type Error = $crate::http::header::InvalidHeaderValue;
fn try_into(self) -> Result<$crate::http::header::HeaderValue, Self::Error> {
use std::fmt::Write;
let mut writer = $crate::http::header::Writer::new();
let _ = write!(&mut writer, "{}", self);
$crate::http::header::HeaderValue::from_shared(writer.take())
$crate::http::header::HeaderValue::from_maybe_shared(writer.take())
}
}
};
@ -330,11 +332,10 @@ macro_rules! header {
};
}
mod accept_charset;
//mod accept_encoding;
mod accept_language;
mod accept;
mod accept_language;
mod allow;
mod cache_control;
mod content_disposition;

View File

@ -7,7 +7,7 @@ use header::{Header, Raw};
/// `Range` header, defined in [RFC7233](https://tools.ietf.org/html/rfc7233#section-3.1)
///
/// The "Range" header field on a GET request modifies the method
/// semantics to request transfer of only one or more subranges of the
/// semantics to request transfer of only one or more sub-ranges of the
/// selected representation data, rather than the entire selected
/// representation data.
///
@ -183,13 +183,13 @@ impl fmt::Display for Range {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Range::Bytes(ref ranges) => {
try!(write!(f, "bytes="));
write!(f, "bytes=")?;
for (i, range) in ranges.iter().enumerate() {
if i != 0 {
try!(f.write_str(","));
f.write_str(",")?;
}
try!(Display::fmt(range, f));
Display::fmt(range, f)?;
}
Ok(())
}
@ -214,9 +214,9 @@ impl FromStr for Range {
}
Ok(Range::Bytes(ranges))
}
(Some(unit), Some(range_str)) if unit != "" && range_str != "" => Ok(
Range::Unregistered(unit.to_owned(), range_str.to_owned()),
),
(Some(unit), Some(range_str)) if unit != "" && range_str != "" => {
Ok(Range::Unregistered(unit.to_owned(), range_str.to_owned()))
}
_ => Err(::Error::Header),
}
}
@ -229,7 +229,8 @@ impl FromStr for ByteRangeSpec {
let mut parts = s.splitn(2, '-');
match (parts.next(), parts.next()) {
(Some(""), Some(end)) => end.parse()
(Some(""), Some(end)) => end
.parse()
.or(Err(::Error::Header))
.map(ByteRangeSpec::Last),
(Some(start), Some("")) => start
@ -272,163 +273,138 @@ impl Header for Range {
}
}
#[test]
fn test_parse_bytes_range_valid() {
let r: Range = Header::parse_header(&"bytes=1-100".into()).unwrap();
let r2: Range = Header::parse_header(&"bytes=1-100,-".into()).unwrap();
let r3 = Range::bytes(1, 100);
assert_eq!(r, r2);
assert_eq!(r2, r3);
#[cfg(test)]
mod tests {
use super::*;
let r: Range = Header::parse_header(&"bytes=1-100,200-".into()).unwrap();
let r2: Range =
Header::parse_header(&"bytes= 1-100 , 101-xxx, 200- ".into()).unwrap();
let r3 = Range::Bytes(vec![
ByteRangeSpec::FromTo(1, 100),
ByteRangeSpec::AllFrom(200),
]);
assert_eq!(r, r2);
assert_eq!(r2, r3);
#[test]
fn test_parse_bytes_range_valid() {
let r: Range = Header::parse_header(&"bytes=1-100".into()).unwrap();
let r2: Range = Header::parse_header(&"bytes=1-100,-".into()).unwrap();
let r3 = Range::bytes(1, 100);
assert_eq!(r, r2);
assert_eq!(r2, r3);
let r: Range = Header::parse_header(&"bytes=1-100,-100".into()).unwrap();
let r2: Range = Header::parse_header(&"bytes=1-100, ,,-100".into()).unwrap();
let r3 = Range::Bytes(vec![
ByteRangeSpec::FromTo(1, 100),
ByteRangeSpec::Last(100),
]);
assert_eq!(r, r2);
assert_eq!(r2, r3);
let r: Range = Header::parse_header(&"bytes=1-100,200-".into()).unwrap();
let r2: Range =
Header::parse_header(&"bytes= 1-100 , 101-xxx, 200- ".into()).unwrap();
let r3 = Range::Bytes(vec![
ByteRangeSpec::FromTo(1, 100),
ByteRangeSpec::AllFrom(200),
]);
assert_eq!(r, r2);
assert_eq!(r2, r3);
let r: Range = Header::parse_header(&"custom=1-100,-100".into()).unwrap();
let r2 = Range::Unregistered("custom".to_owned(), "1-100,-100".to_owned());
assert_eq!(r, r2);
}
#[test]
fn test_parse_unregistered_range_valid() {
let r: Range = Header::parse_header(&"custom=1-100,-100".into()).unwrap();
let r2 = Range::Unregistered("custom".to_owned(), "1-100,-100".to_owned());
assert_eq!(r, r2);
let r: Range = Header::parse_header(&"custom=abcd".into()).unwrap();
let r2 = Range::Unregistered("custom".to_owned(), "abcd".to_owned());
assert_eq!(r, r2);
let r: Range = Header::parse_header(&"custom=xxx-yyy".into()).unwrap();
let r2 = Range::Unregistered("custom".to_owned(), "xxx-yyy".to_owned());
assert_eq!(r, r2);
}
#[test]
fn test_parse_invalid() {
let r: ::Result<Range> = Header::parse_header(&"bytes=1-a,-".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"bytes=1-2-3".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"abc".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"bytes=1-100=".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"bytes=".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"custom=".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"=1-100".into());
assert_eq!(r.ok(), None);
}
#[test]
fn test_fmt() {
use header::Headers;
let mut headers = Headers::new();
headers.set(Range::Bytes(vec![
ByteRangeSpec::FromTo(0, 1000),
ByteRangeSpec::AllFrom(2000),
]));
assert_eq!(&headers.to_string(), "Range: bytes=0-1000,2000-\r\n");
headers.clear();
headers.set(Range::Bytes(vec![]));
assert_eq!(&headers.to_string(), "Range: bytes=\r\n");
headers.clear();
headers.set(Range::Unregistered(
"custom".to_owned(),
"1-xxx".to_owned(),
));
assert_eq!(&headers.to_string(), "Range: custom=1-xxx\r\n");
}
#[test]
fn test_byte_range_spec_to_satisfiable_range() {
assert_eq!(
Some((0, 0)),
ByteRangeSpec::FromTo(0, 0).to_satisfiable_range(3)
);
assert_eq!(
Some((1, 2)),
ByteRangeSpec::FromTo(1, 2).to_satisfiable_range(3)
);
assert_eq!(
Some((1, 2)),
ByteRangeSpec::FromTo(1, 5).to_satisfiable_range(3)
);
assert_eq!(
None,
ByteRangeSpec::FromTo(3, 3).to_satisfiable_range(3)
);
assert_eq!(
None,
ByteRangeSpec::FromTo(2, 1).to_satisfiable_range(3)
);
assert_eq!(
None,
ByteRangeSpec::FromTo(0, 0).to_satisfiable_range(0)
);
assert_eq!(
Some((0, 2)),
ByteRangeSpec::AllFrom(0).to_satisfiable_range(3)
);
assert_eq!(
Some((2, 2)),
ByteRangeSpec::AllFrom(2).to_satisfiable_range(3)
);
assert_eq!(
None,
ByteRangeSpec::AllFrom(3).to_satisfiable_range(3)
);
assert_eq!(
None,
ByteRangeSpec::AllFrom(5).to_satisfiable_range(3)
);
assert_eq!(
None,
ByteRangeSpec::AllFrom(0).to_satisfiable_range(0)
);
assert_eq!(
Some((1, 2)),
ByteRangeSpec::Last(2).to_satisfiable_range(3)
);
assert_eq!(
Some((2, 2)),
ByteRangeSpec::Last(1).to_satisfiable_range(3)
);
assert_eq!(
Some((0, 2)),
ByteRangeSpec::Last(5).to_satisfiable_range(3)
);
assert_eq!(None, ByteRangeSpec::Last(0).to_satisfiable_range(3));
assert_eq!(None, ByteRangeSpec::Last(2).to_satisfiable_range(0));
let r: Range = Header::parse_header(&"bytes=1-100,-100".into()).unwrap();
let r2: Range = Header::parse_header(&"bytes=1-100, ,,-100".into()).unwrap();
let r3 = Range::Bytes(vec![
ByteRangeSpec::FromTo(1, 100),
ByteRangeSpec::Last(100),
]);
assert_eq!(r, r2);
assert_eq!(r2, r3);
let r: Range = Header::parse_header(&"custom=1-100,-100".into()).unwrap();
let r2 = Range::Unregistered("custom".to_owned(), "1-100,-100".to_owned());
assert_eq!(r, r2);
}
#[test]
fn test_parse_unregistered_range_valid() {
let r: Range = Header::parse_header(&"custom=1-100,-100".into()).unwrap();
let r2 = Range::Unregistered("custom".to_owned(), "1-100,-100".to_owned());
assert_eq!(r, r2);
let r: Range = Header::parse_header(&"custom=abcd".into()).unwrap();
let r2 = Range::Unregistered("custom".to_owned(), "abcd".to_owned());
assert_eq!(r, r2);
let r: Range = Header::parse_header(&"custom=xxx-yyy".into()).unwrap();
let r2 = Range::Unregistered("custom".to_owned(), "xxx-yyy".to_owned());
assert_eq!(r, r2);
}
#[test]
fn test_parse_invalid() {
let r: ::Result<Range> = Header::parse_header(&"bytes=1-a,-".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"bytes=1-2-3".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"abc".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"bytes=1-100=".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"bytes=".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"custom=".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"=1-100".into());
assert_eq!(r.ok(), None);
}
#[test]
fn test_fmt() {
use header::Headers;
let mut headers = Headers::new();
headers.set(Range::Bytes(vec![
ByteRangeSpec::FromTo(0, 1000),
ByteRangeSpec::AllFrom(2000),
]));
assert_eq!(&headers.to_string(), "Range: bytes=0-1000,2000-\r\n");
headers.clear();
headers.set(Range::Bytes(vec![]));
assert_eq!(&headers.to_string(), "Range: bytes=\r\n");
headers.clear();
headers.set(Range::Unregistered("custom".to_owned(), "1-xxx".to_owned()));
assert_eq!(&headers.to_string(), "Range: custom=1-xxx\r\n");
}
#[test]
fn test_byte_range_spec_to_satisfiable_range() {
assert_eq!(
Some((0, 0)),
ByteRangeSpec::FromTo(0, 0).to_satisfiable_range(3)
);
assert_eq!(
Some((1, 2)),
ByteRangeSpec::FromTo(1, 2).to_satisfiable_range(3)
);
assert_eq!(
Some((1, 2)),
ByteRangeSpec::FromTo(1, 5).to_satisfiable_range(3)
);
assert_eq!(None, ByteRangeSpec::FromTo(3, 3).to_satisfiable_range(3));
assert_eq!(None, ByteRangeSpec::FromTo(2, 1).to_satisfiable_range(3));
assert_eq!(None, ByteRangeSpec::FromTo(0, 0).to_satisfiable_range(0));
assert_eq!(
Some((0, 2)),
ByteRangeSpec::AllFrom(0).to_satisfiable_range(3)
);
assert_eq!(
Some((2, 2)),
ByteRangeSpec::AllFrom(2).to_satisfiable_range(3)
);
assert_eq!(None, ByteRangeSpec::AllFrom(3).to_satisfiable_range(3));
assert_eq!(None, ByteRangeSpec::AllFrom(5).to_satisfiable_range(3));
assert_eq!(None, ByteRangeSpec::AllFrom(0).to_satisfiable_range(0));
assert_eq!(Some((1, 2)), ByteRangeSpec::Last(2).to_satisfiable_range(3));
assert_eq!(Some((2, 2)), ByteRangeSpec::Last(1).to_satisfiable_range(3));
assert_eq!(Some((0, 2)), ByteRangeSpec::Last(5).to_satisfiable_range(3));
assert_eq!(None, ByteRangeSpec::Last(0).to_satisfiable_range(3));
assert_eq!(None, ByteRangeSpec::Last(2).to_satisfiable_range(0));
}
}

View File

@ -0,0 +1,385 @@
use std::collections::hash_map::{self, Entry};
use std::convert::TryFrom;
use either::Either;
use fxhash::FxHashMap;
use http::header::{HeaderName, HeaderValue};
/// A set of HTTP headers
///
/// `HeaderMap` is an multi-map of [`HeaderName`] to values.
///
/// [`HeaderName`]: struct.HeaderName.html
#[derive(Debug, Clone)]
pub struct HeaderMap {
pub(crate) inner: FxHashMap<HeaderName, Value>,
}
#[derive(Debug, Clone)]
pub(crate) enum Value {
One(HeaderValue),
Multi(Vec<HeaderValue>),
}
impl Value {
fn get(&self) -> &HeaderValue {
match self {
Value::One(ref val) => val,
Value::Multi(ref val) => &val[0],
}
}
fn get_mut(&mut self) -> &mut HeaderValue {
match self {
Value::One(ref mut val) => val,
Value::Multi(ref mut val) => &mut val[0],
}
}
fn append(&mut self, val: HeaderValue) {
match self {
Value::One(_) => {
let data = std::mem::replace(self, Value::Multi(vec![val]));
match data {
Value::One(val) => self.append(val),
Value::Multi(_) => unreachable!(),
}
}
Value::Multi(ref mut vec) => vec.push(val),
}
}
}
impl HeaderMap {
/// Create an empty `HeaderMap`.
///
/// The map will be created without any capacity. This function will not
/// allocate.
pub fn new() -> Self {
HeaderMap {
inner: FxHashMap::default(),
}
}
/// Create an empty `HeaderMap` with the specified capacity.
///
/// The returned map will allocate internal storage in order to hold about
/// `capacity` elements without reallocating. However, this is a "best
/// effort" as there are usage patterns that could cause additional
/// allocations before `capacity` headers are stored in the map.
///
/// More capacity than requested may be allocated.
pub fn with_capacity(capacity: usize) -> HeaderMap {
HeaderMap {
inner: FxHashMap::with_capacity_and_hasher(capacity, Default::default()),
}
}
/// Returns the number of keys stored in the map.
///
/// This number could be be less than or equal to actual headers stored in
/// the map.
pub fn len(&self) -> usize {
self.inner.len()
}
/// Returns true if the map contains no elements.
pub fn is_empty(&self) -> bool {
self.inner.len() == 0
}
/// Clears the map, removing all key-value pairs. Keeps the allocated memory
/// for reuse.
pub fn clear(&mut self) {
self.inner.clear();
}
/// Returns the number of headers the map can hold without reallocating.
///
/// This number is an approximation as certain usage patterns could cause
/// additional allocations before the returned capacity is filled.
pub fn capacity(&self) -> usize {
self.inner.capacity()
}
/// Reserves capacity for at least `additional` more headers to be inserted
/// into the `HeaderMap`.
///
/// The header map may reserve more space to avoid frequent reallocations.
/// Like with `with_capacity`, this will be a "best effort" to avoid
/// allocations until `additional` more headers are inserted. Certain usage
/// patterns could cause additional allocations before the number is
/// reached.
pub fn reserve(&mut self, additional: usize) {
self.inner.reserve(additional)
}
/// Returns a reference to the value associated with the key.
///
/// If there are multiple values associated with the key, then the first one
/// is returned. Use `get_all` to get all values associated with a given
/// key. Returns `None` if there are no values associated with the key.
pub fn get<N: AsName>(&self, name: N) -> Option<&HeaderValue> {
self.get2(name).map(|v| v.get())
}
fn get2<N: AsName>(&self, name: N) -> Option<&Value> {
match name.as_name() {
Either::Left(name) => self.inner.get(name),
Either::Right(s) => {
if let Ok(name) = HeaderName::try_from(s) {
self.inner.get(&name)
} else {
None
}
}
}
}
/// Returns a view of all values associated with a key.
///
/// The returned view does not incur any allocations and allows iterating
/// the values associated with the key. See [`GetAll`] for more details.
/// Returns `None` if there are no values associated with the key.
///
/// [`GetAll`]: struct.GetAll.html
pub fn get_all<N: AsName>(&self, name: N) -> GetAll<'_> {
GetAll {
idx: 0,
item: self.get2(name),
}
}
/// Returns a mutable reference to the value associated with the key.
///
/// If there are multiple values associated with the key, then the first one
/// is returned. Use `entry` to get all values associated with a given
/// key. Returns `None` if there are no values associated with the key.
pub fn get_mut<N: AsName>(&mut self, name: N) -> Option<&mut HeaderValue> {
match name.as_name() {
Either::Left(name) => self.inner.get_mut(name).map(|v| v.get_mut()),
Either::Right(s) => {
if let Ok(name) = HeaderName::try_from(s) {
self.inner.get_mut(&name).map(|v| v.get_mut())
} else {
None
}
}
}
}
/// Returns true if the map contains a value for the specified key.
pub fn contains_key<N: AsName>(&self, key: N) -> bool {
match key.as_name() {
Either::Left(name) => self.inner.contains_key(name),
Either::Right(s) => {
if let Ok(name) = HeaderName::try_from(s) {
self.inner.contains_key(&name)
} else {
false
}
}
}
}
/// An iterator visiting all key-value pairs.
///
/// The iteration order is arbitrary, but consistent across platforms for
/// the same crate version. Each key will be yielded once per associated
/// value. So, if a key has 3 associated values, it will be yielded 3 times.
pub fn iter(&self) -> Iter<'_> {
Iter::new(self.inner.iter())
}
/// An iterator visiting all keys.
///
/// The iteration order is arbitrary, but consistent across platforms for
/// the same crate version. Each key will be yielded only once even if it
/// has multiple associated values.
pub fn keys(&self) -> Keys<'_> {
Keys(self.inner.keys())
}
/// Inserts a key-value pair into the map.
///
/// If the map did not previously have this key present, then `None` is
/// returned.
///
/// If the map did have this key present, the new value is associated with
/// the key and all previous values are removed. **Note** that only a single
/// one of the previous values is returned. If there are multiple values
/// that have been previously associated with the key, then the first one is
/// returned. See `insert_mult` on `OccupiedEntry` for an API that returns
/// all values.
///
/// The key is not updated, though; this matters for types that can be `==`
/// without being identical.
pub fn insert(&mut self, key: HeaderName, val: HeaderValue) {
let _ = self.inner.insert(key, Value::One(val));
}
/// Inserts a key-value pair into the map.
///
/// If the map did not previously have this key present, then `false` is
/// returned.
///
/// If the map did have this key present, the new value is pushed to the end
/// of the list of values currently associated with the key. The key is not
/// updated, though; this matters for types that can be `==` without being
/// identical.
pub fn append(&mut self, key: HeaderName, value: HeaderValue) {
match self.inner.entry(key) {
Entry::Occupied(mut entry) => entry.get_mut().append(value),
Entry::Vacant(entry) => {
entry.insert(Value::One(value));
}
}
}
/// Removes all headers for a particular header name from the map.
pub fn remove<N: AsName>(&mut self, key: N) {
match key.as_name() {
Either::Left(name) => {
let _ = self.inner.remove(name);
}
Either::Right(s) => {
if let Ok(name) = HeaderName::try_from(s) {
let _ = self.inner.remove(&name);
}
}
}
}
}
#[doc(hidden)]
pub trait AsName {
fn as_name(&self) -> Either<&HeaderName, &str>;
}
impl AsName for HeaderName {
fn as_name(&self) -> Either<&HeaderName, &str> {
Either::Left(self)
}
}
impl<'a> AsName for &'a HeaderName {
fn as_name(&self) -> Either<&HeaderName, &str> {
Either::Left(self)
}
}
impl<'a> AsName for &'a str {
fn as_name(&self) -> Either<&HeaderName, &str> {
Either::Right(self)
}
}
impl AsName for String {
fn as_name(&self) -> Either<&HeaderName, &str> {
Either::Right(self.as_str())
}
}
impl<'a> AsName for &'a String {
fn as_name(&self) -> Either<&HeaderName, &str> {
Either::Right(self.as_str())
}
}
pub struct GetAll<'a> {
idx: usize,
item: Option<&'a Value>,
}
impl<'a> Iterator for GetAll<'a> {
type Item = &'a HeaderValue;
#[inline]
fn next(&mut self) -> Option<&'a HeaderValue> {
if let Some(ref val) = self.item {
match val {
Value::One(ref val) => {
self.item.take();
Some(val)
}
Value::Multi(ref vec) => {
if self.idx < vec.len() {
let item = Some(&vec[self.idx]);
self.idx += 1;
item
} else {
self.item.take();
None
}
}
}
} else {
None
}
}
}
pub struct Keys<'a>(hash_map::Keys<'a, HeaderName, Value>);
impl<'a> Iterator for Keys<'a> {
type Item = &'a HeaderName;
#[inline]
fn next(&mut self) -> Option<&'a HeaderName> {
self.0.next()
}
}
impl<'a> IntoIterator for &'a HeaderMap {
type Item = (&'a HeaderName, &'a HeaderValue);
type IntoIter = Iter<'a>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
pub struct Iter<'a> {
idx: usize,
current: Option<(&'a HeaderName, &'a Vec<HeaderValue>)>,
iter: hash_map::Iter<'a, HeaderName, Value>,
}
impl<'a> Iter<'a> {
fn new(iter: hash_map::Iter<'a, HeaderName, Value>) -> Self {
Self {
iter,
idx: 0,
current: None,
}
}
}
impl<'a> Iterator for Iter<'a> {
type Item = (&'a HeaderName, &'a HeaderValue);
#[inline]
fn next(&mut self) -> Option<(&'a HeaderName, &'a HeaderValue)> {
if let Some(ref mut item) = self.current {
if self.idx < item.1.len() {
let item = (item.0, &item.1[self.idx]);
self.idx += 1;
return Some(item);
} else {
self.idx = 0;
self.current.take();
}
}
if let Some(item) = self.iter.next() {
match item.1 {
Value::One(ref value) => Some((item.0, value)),
Value::Multi(ref vec) => {
self.current = Some((item.0, vec));
self.next()
}
}
} else {
None
}
}
}

View File

@ -1,12 +1,13 @@
//! Various http headers
// This is mostly copy of [hyper](https://github.com/hyperium/hyper/tree/master/src/header)
use std::convert::TryFrom;
use std::{fmt, str::FromStr};
use bytes::{Bytes, BytesMut};
use http::header::GetAll;
use http::Error as HttpError;
use mime::Mime;
use percent_encoding::{AsciiSet, CONTROLS};
pub use http::header::*;
@ -14,13 +15,16 @@ use crate::error::ParseError;
use crate::httpmessage::HttpMessage;
mod common;
pub(crate) mod map;
mod shared;
#[doc(hidden)]
pub use self::common::*;
#[doc(hidden)]
pub use self::shared::*;
#[doc(hidden)]
pub use self::map::GetAll;
pub use self::map::HeaderMap;
/// A trait for any object that will represent a header field and value.
pub trait Header
where
@ -33,7 +37,6 @@ where
fn parse<T: HttpMessage>(msg: &T) -> Result<Self, ParseError>;
}
#[doc(hidden)]
/// A trait for any object that can be Converted to a `HeaderValue`
pub trait IntoHeaderValue: Sized {
/// The type returned in the event of a conversion error.
@ -71,38 +74,58 @@ impl<'a> IntoHeaderValue for &'a [u8] {
}
impl IntoHeaderValue for Bytes {
type Error = InvalidHeaderValueBytes;
type Error = InvalidHeaderValue;
#[inline]
fn try_into(self) -> Result<HeaderValue, Self::Error> {
HeaderValue::from_shared(self)
HeaderValue::from_maybe_shared(self)
}
}
impl IntoHeaderValue for Vec<u8> {
type Error = InvalidHeaderValueBytes;
type Error = InvalidHeaderValue;
#[inline]
fn try_into(self) -> Result<HeaderValue, Self::Error> {
HeaderValue::from_shared(Bytes::from(self))
HeaderValue::try_from(self)
}
}
impl IntoHeaderValue for String {
type Error = InvalidHeaderValueBytes;
type Error = InvalidHeaderValue;
#[inline]
fn try_into(self) -> Result<HeaderValue, Self::Error> {
HeaderValue::from_shared(Bytes::from(self))
HeaderValue::try_from(self)
}
}
impl IntoHeaderValue for usize {
type Error = InvalidHeaderValue;
#[inline]
fn try_into(self) -> Result<HeaderValue, Self::Error> {
let s = format!("{}", self);
HeaderValue::try_from(s)
}
}
impl IntoHeaderValue for u64 {
type Error = InvalidHeaderValue;
#[inline]
fn try_into(self) -> Result<HeaderValue, Self::Error> {
let s = format!("{}", self);
HeaderValue::try_from(s)
}
}
impl IntoHeaderValue for Mime {
type Error = InvalidHeaderValueBytes;
type Error = InvalidHeaderValue;
#[inline]
fn try_into(self) -> Result<HeaderValue, Self::Error> {
HeaderValue::from_shared(Bytes::from(format!("{}", self)))
HeaderValue::try_from(format!("{}", self))
}
}
@ -125,10 +148,7 @@ impl ContentEncoding {
#[inline]
/// Is the content compressed?
pub fn is_compression(self) -> bool {
match self {
ContentEncoding::Identity | ContentEncoding::Auto => false,
_ => true,
}
matches!(self, ContentEncoding::Identity | ContentEncoding::Auto)
}
#[inline]
@ -182,7 +202,7 @@ impl Writer {
}
}
fn take(&mut self) -> Bytes {
self.buf.take().freeze()
self.buf.split().freeze()
}
}
@ -194,7 +214,7 @@ impl fmt::Write for Writer {
}
#[inline]
fn write_fmt(&mut self, args: fmt::Arguments) -> fmt::Result {
fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result {
fmt::write(self, args)
}
}
@ -202,8 +222,8 @@ impl fmt::Write for Writer {
#[inline]
#[doc(hidden)]
/// Reads a comma-delimited raw header into a Vec.
pub fn from_comma_delimited<T: FromStr>(
all: GetAll<HeaderValue>,
pub fn from_comma_delimited<'a, I: Iterator<Item = &'a HeaderValue> + 'a, T: FromStr>(
all: I,
) -> Result<Vec<T>, ParseError> {
let mut result = Vec::new();
for h in all {
@ -236,7 +256,7 @@ pub fn from_one_raw_str<T: FromStr>(val: Option<&HeaderValue>) -> Result<T, Pars
#[inline]
#[doc(hidden)]
/// Format an array into a comma-delimited string.
pub fn fmt_comma_delimited<T>(f: &mut fmt::Formatter, parts: &[T]) -> fmt::Result
pub fn fmt_comma_delimited<T>(f: &mut fmt::Formatter<'_>, parts: &[T]) -> fmt::Result
where
T: fmt::Display,
{
@ -338,11 +358,9 @@ pub fn parse_extended_value(
}
impl fmt::Display for ExtendedValue {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let encoded_value = percent_encoding::percent_encode(
&self.value[..],
self::percent_encoding_http::HTTP_VALUE,
);
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let encoded_value =
percent_encoding::percent_encode(&self.value[..], HTTP_VALUE);
if let Some(ref lang) = self.language_tag {
write!(f, "{}'{}'{}", self.charset, lang, encoded_value)
} else {
@ -355,27 +373,46 @@ impl fmt::Display for ExtendedValue {
/// [https://tools.ietf.org/html/rfc5987#section-3.2][url]
///
/// [url]: https://tools.ietf.org/html/rfc5987#section-3.2
pub fn http_percent_encode(f: &mut fmt::Formatter, bytes: &[u8]) -> fmt::Result {
let encoded =
percent_encoding::percent_encode(bytes, self::percent_encoding_http::HTTP_VALUE);
pub fn http_percent_encode(f: &mut fmt::Formatter<'_>, bytes: &[u8]) -> fmt::Result {
let encoded = percent_encoding::percent_encode(bytes, HTTP_VALUE);
fmt::Display::fmt(&encoded, f)
}
mod percent_encoding_http {
use percent_encoding::{self, define_encode_set};
// internal module because macro is hard-coded to make a public item
// but we don't want to public export this item
define_encode_set! {
// This encode set is used for HTTP header values and is defined at
// https://tools.ietf.org/html/rfc5987#section-3.2
pub HTTP_VALUE = [percent_encoding::SIMPLE_ENCODE_SET] | {
' ', '"', '%', '\'', '(', ')', '*', ',', '/', ':', ';', '<', '-', '>', '?',
'[', '\\', ']', '{', '}'
/// Convert http::HeaderMap to a HeaderMap
impl From<http::HeaderMap> for HeaderMap {
fn from(map: http::HeaderMap) -> HeaderMap {
let mut new_map = HeaderMap::with_capacity(map.capacity());
for (h, v) in map.iter() {
new_map.append(h.clone(), v.clone());
}
new_map
}
}
// This encode set is used for HTTP header values and is defined at
// https://tools.ietf.org/html/rfc5987#section-3.2
pub(crate) const HTTP_VALUE: &AsciiSet = &CONTROLS
.add(b' ')
.add(b'"')
.add(b'%')
.add(b'\'')
.add(b'(')
.add(b')')
.add(b'*')
.add(b',')
.add(b'/')
.add(b':')
.add(b';')
.add(b'<')
.add(b'-')
.add(b'>')
.add(b'?')
.add(b'[')
.add(b'\\')
.add(b']')
.add(b'{')
.add(b'}');
#[cfg(test)]
mod tests {
use super::shared::Charset;

View File

@ -98,7 +98,7 @@ impl Charset {
}
impl Display for Charset {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(self.label())
}
}
@ -137,17 +137,22 @@ impl FromStr for Charset {
}
}
#[test]
fn test_parse() {
assert_eq!(Us_Ascii, "us-ascii".parse().unwrap());
assert_eq!(Us_Ascii, "US-Ascii".parse().unwrap());
assert_eq!(Us_Ascii, "US-ASCII".parse().unwrap());
assert_eq!(Shift_Jis, "Shift-JIS".parse().unwrap());
assert_eq!(Ext("ABCD".to_owned()), "abcd".parse().unwrap());
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_display() {
assert_eq!("US-ASCII", format!("{}", Us_Ascii));
assert_eq!("ABCD", format!("{}", Ext("ABCD".to_owned())));
#[test]
fn test_parse() {
assert_eq!(Us_Ascii, "us-ascii".parse().unwrap());
assert_eq!(Us_Ascii, "US-Ascii".parse().unwrap());
assert_eq!(Us_Ascii, "US-ASCII".parse().unwrap());
assert_eq!(Shift_Jis, "Shift-JIS".parse().unwrap());
assert_eq!(Ext("ABCD".to_owned()), "abcd".parse().unwrap());
}
#[test]
fn test_display() {
assert_eq!("US-ASCII", format!("{}", Us_Ascii));
assert_eq!("ABCD", format!("{}", Ext("ABCD".to_owned())));
}
}

View File

@ -27,7 +27,7 @@ pub enum Encoding {
}
impl fmt::Display for Encoding {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.write_str(match *self {
Chunked => "chunked",
Brotli => "br",

View File

@ -1,7 +1,7 @@
use std::fmt::{self, Display, Write};
use std::str::FromStr;
use crate::header::{HeaderValue, IntoHeaderValue, InvalidHeaderValueBytes, Writer};
use crate::header::{HeaderValue, IntoHeaderValue, InvalidHeaderValue, Writer};
/// check that each char in the slice is either:
/// 1. `%x21`, or
@ -113,7 +113,7 @@ impl EntityTag {
}
impl Display for EntityTag {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
if self.weak {
write!(f, "W/\"{}\"", self.tag)
} else {
@ -157,12 +157,12 @@ impl FromStr for EntityTag {
}
impl IntoHeaderValue for EntityTag {
type Error = InvalidHeaderValueBytes;
type Error = InvalidHeaderValue;
fn try_into(self) -> Result<HeaderValue, Self::Error> {
let mut wrt = Writer::new();
write!(wrt, "{}", self).unwrap();
HeaderValue::from_shared(wrt.take())
HeaderValue::from_maybe_shared(wrt.take())
}
}

View File

@ -1,117 +1,99 @@
use std::fmt::{self, Display};
use std::io::Write;
use std::str::FromStr;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use std::time::{SystemTime, UNIX_EPOCH};
use bytes::{BufMut, BytesMut};
use http::header::{HeaderValue, InvalidHeaderValueBytes};
use bytes::{buf::BufMutExt, BytesMut};
use http::header::{HeaderValue, InvalidHeaderValue};
use time::{offset, OffsetDateTime, PrimitiveDateTime};
use crate::error::ParseError;
use crate::header::IntoHeaderValue;
use crate::time_parser;
/// A timestamp with HTTP formatting and parsing
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct HttpDate(time::Tm);
pub struct HttpDate(OffsetDateTime);
impl FromStr for HttpDate {
type Err = ParseError;
fn from_str(s: &str) -> Result<HttpDate, ParseError> {
match time::strptime(s, "%a, %d %b %Y %T %Z")
.or_else(|_| time::strptime(s, "%A, %d-%b-%y %T %Z"))
.or_else(|_| time::strptime(s, "%c"))
{
Ok(t) => Ok(HttpDate(t)),
Err(_) => Err(ParseError::Header),
match time_parser::parse_http_date(s) {
Some(t) => Ok(HttpDate(t.assume_utc())),
None => Err(ParseError::Header),
}
}
}
impl Display for HttpDate {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fmt::Display::fmt(&self.0.to_utc().rfc822(), f)
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&self.0.format("%a, %d %b %Y %H:%M:%S GMT"), f)
}
}
impl From<time::Tm> for HttpDate {
fn from(tm: time::Tm) -> HttpDate {
HttpDate(tm)
impl From<OffsetDateTime> for HttpDate {
fn from(dt: OffsetDateTime) -> HttpDate {
HttpDate(dt)
}
}
impl From<SystemTime> for HttpDate {
fn from(sys: SystemTime) -> HttpDate {
let tmspec = match sys.duration_since(UNIX_EPOCH) {
Ok(dur) => {
time::Timespec::new(dur.as_secs() as i64, dur.subsec_nanos() as i32)
}
Err(err) => {
let neg = err.duration();
time::Timespec::new(
-(neg.as_secs() as i64),
-(neg.subsec_nanos() as i32),
)
}
};
HttpDate(time::at_utc(tmspec))
HttpDate(PrimitiveDateTime::from(sys).assume_utc())
}
}
impl IntoHeaderValue for HttpDate {
type Error = InvalidHeaderValueBytes;
type Error = InvalidHeaderValue;
fn try_into(self) -> Result<HeaderValue, Self::Error> {
let mut wrt = BytesMut::with_capacity(29).writer();
write!(wrt, "{}", self.0.rfc822()).unwrap();
HeaderValue::from_shared(wrt.get_mut().take().freeze())
write!(
wrt,
"{}",
self.0
.to_offset(offset!(UTC))
.format("%a, %d %b %Y %H:%M:%S GMT")
)
.unwrap();
HeaderValue::from_maybe_shared(wrt.get_mut().split().freeze())
}
}
impl From<HttpDate> for SystemTime {
fn from(date: HttpDate) -> SystemTime {
let spec = date.0.to_timespec();
if spec.sec >= 0 {
UNIX_EPOCH + Duration::new(spec.sec as u64, spec.nsec as u32)
} else {
UNIX_EPOCH - Duration::new(spec.sec as u64, spec.nsec as u32)
}
let dt = date.0;
let epoch = OffsetDateTime::unix_epoch();
UNIX_EPOCH + (dt - epoch)
}
}
#[cfg(test)]
mod tests {
use super::HttpDate;
use time::Tm;
const NOV_07: HttpDate = HttpDate(Tm {
tm_nsec: 0,
tm_sec: 37,
tm_min: 48,
tm_hour: 8,
tm_mday: 7,
tm_mon: 10,
tm_year: 94,
tm_wday: 0,
tm_isdst: 0,
tm_yday: 0,
tm_utcoff: 0,
});
use time::{date, time, PrimitiveDateTime};
#[test]
fn test_date() {
let nov_07 = HttpDate(
PrimitiveDateTime::new(date!(1994 - 11 - 07), time!(8:48:37)).assume_utc(),
);
assert_eq!(
"Sun, 07 Nov 1994 08:48:37 GMT".parse::<HttpDate>().unwrap(),
NOV_07
nov_07
);
assert_eq!(
"Sunday, 07-Nov-94 08:48:37 GMT"
.parse::<HttpDate>()
.unwrap(),
NOV_07
nov_07
);
assert_eq!(
"Sun Nov 7 08:48:37 1994".parse::<HttpDate>().unwrap(),
NOV_07
nov_07
);
assert!("this-is-no-date".parse::<HttpDate>().is_err());
}

View File

@ -53,7 +53,7 @@ impl<T: PartialEq> cmp::PartialOrd for QualityItem<T> {
}
impl<T: fmt::Display> fmt::Display for QualityItem<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&self.item, f)?;
match self.quality.0 {
1000 => Ok(()),

View File

@ -1,208 +1,171 @@
use std::io;
use bytes::{BufMut, BytesMut};
use http::Version;
use std::{mem, ptr, slice};
const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\
2021222324252627282930313233343536373839\
4041424344454647484950515253545556575859\
6061626364656667686970717273747576777879\
8081828384858687888990919293949596979899";
use crate::extensions::Extensions;
pub(crate) const STATUS_LINE_BUF_SIZE: usize = 13;
const DIGITS_START: u8 = b'0';
pub(crate) fn write_status_line(version: Version, mut n: u16, bytes: &mut BytesMut) {
let mut buf: [u8; STATUS_LINE_BUF_SIZE] = [
b'H', b'T', b'T', b'P', b'/', b'1', b'.', b'1', b' ', b' ', b' ', b' ', b' ',
];
pub(crate) fn write_status_line(version: Version, n: u16, bytes: &mut BytesMut) {
match version {
Version::HTTP_2 => buf[5] = b'2',
Version::HTTP_10 => buf[7] = b'0',
Version::HTTP_09 => {
buf[5] = b'0';
buf[7] = b'9';
}
_ => (),
}
let mut curr: isize = 12;
let buf_ptr = buf.as_mut_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
let four = n > 999;
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(curr), 2);
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0';
}
} else {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(
lut_ptr.offset(d1 as isize),
buf_ptr.offset(curr),
2,
);
Version::HTTP_11 => bytes.put_slice(b"HTTP/1.1 "),
Version::HTTP_10 => bytes.put_slice(b"HTTP/1.0 "),
Version::HTTP_09 => bytes.put_slice(b"HTTP/0.9 "),
_ => {
// other HTTP version handlers do not use this method
}
}
bytes.put_slice(&buf);
if four {
bytes.put(b' ');
}
let d100 = (n / 100) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
// trailing space before reason
bytes.put_u8(b' ');
}
/// NOTE: bytes object has to contain enough space
pub fn write_content_length(mut n: usize, bytes: &mut BytesMut) {
if n < 10 {
let mut buf: [u8; 21] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l', b'e',
b'n', b'g', b't', b'h', b':', b' ', b'0', b'\r', b'\n',
];
buf[18] = (n as u8) + b'0';
bytes.put_slice(&buf);
} else if n < 100 {
let mut buf: [u8; 22] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l', b'e',
b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'\r', b'\n',
];
let d1 = n << 1;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(18),
2,
);
}
bytes.put_slice(&buf);
} else if n < 1000 {
let mut buf: [u8; 23] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l', b'e',
b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'0', b'\r', b'\n',
];
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(19),
2,
)
};
pub fn write_content_length(n: u64, bytes: &mut BytesMut) {
if n == 0 {
bytes.put_slice(b"\r\ncontent-length: 0\r\n");
return;
}
// decode last 1
buf[18] = (n as u8) + b'0';
let mut buf = itoa::Buffer::new();
bytes.put_slice(&buf);
} else {
bytes.put_slice(b"\r\ncontent-length: ");
convert_usize(n, bytes);
bytes.put_slice(b"\r\ncontent-length: ");
bytes.put_slice(buf.format(n).as_bytes());
bytes.put_slice(b"\r\n");
}
pub(crate) struct Writer<'a>(pub &'a mut BytesMut);
impl<'a> io::Write for Writer<'a> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.0.extend_from_slice(buf);
Ok(buf.len())
}
fn flush(&mut self) -> io::Result<()> {
Ok(())
}
}
pub(crate) fn convert_usize(mut n: usize, bytes: &mut BytesMut) {
let mut curr: isize = 39;
let mut buf: [u8; 41] = unsafe { mem::uninitialized() };
buf[39] = b'\r';
buf[40] = b'\n';
let buf_ptr = buf.as_mut_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
pub(crate) trait DataFactory {
fn set(&self, ext: &mut Extensions);
}
// eagerly decode 4 characters at a time
while n >= 10_000 {
let rem = (n % 10_000) as isize;
n /= 10_000;
pub(crate) struct Data<T>(pub(crate) T);
let d1 = (rem / 100) << 1;
let d2 = (rem % 100) << 1;
curr -= 4;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
ptr::copy_nonoverlapping(lut_ptr.offset(d2), buf_ptr.offset(curr + 2), 2);
}
}
// if we reach here numbers are <= 9999, so at most 4 chars long
let mut n = n as isize; // possibly reduce 64bit math
// decode 2 more chars, if > 2 chars
if n >= 100 {
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0';
}
} else {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
unsafe {
bytes.extend_from_slice(slice::from_raw_parts(
buf_ptr.offset(curr),
41 - curr as usize,
));
impl<T: Clone + 'static> DataFactory for Data<T> {
fn set(&self, ext: &mut Extensions) {
ext.insert(self.0.clone())
}
}
#[cfg(test)]
mod tests {
use std::str::from_utf8;
use super::*;
#[test]
fn test_status_line() {
let mut bytes = BytesMut::new();
bytes.reserve(50);
write_status_line(Version::HTTP_11, 200, &mut bytes);
assert_eq!(from_utf8(&bytes.split().freeze()).unwrap(), "HTTP/1.1 200 ");
let mut bytes = BytesMut::new();
bytes.reserve(50);
write_status_line(Version::HTTP_09, 404, &mut bytes);
assert_eq!(from_utf8(&bytes.split().freeze()).unwrap(), "HTTP/0.9 404 ");
let mut bytes = BytesMut::new();
bytes.reserve(50);
write_status_line(Version::HTTP_09, 515, &mut bytes);
assert_eq!(from_utf8(&bytes.split().freeze()).unwrap(), "HTTP/0.9 515 ");
}
#[test]
fn test_write_content_length() {
let mut bytes = BytesMut::new();
bytes.reserve(50);
write_content_length(0, &mut bytes);
assert_eq!(bytes.take().freeze(), b"\r\ncontent-length: 0\r\n"[..]);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 0\r\n"[..]);
bytes.reserve(50);
write_content_length(9, &mut bytes);
assert_eq!(bytes.take().freeze(), b"\r\ncontent-length: 9\r\n"[..]);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 9\r\n"[..]);
bytes.reserve(50);
write_content_length(10, &mut bytes);
assert_eq!(bytes.take().freeze(), b"\r\ncontent-length: 10\r\n"[..]);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 10\r\n"[..]);
bytes.reserve(50);
write_content_length(99, &mut bytes);
assert_eq!(bytes.take().freeze(), b"\r\ncontent-length: 99\r\n"[..]);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 99\r\n"[..]);
bytes.reserve(50);
write_content_length(100, &mut bytes);
assert_eq!(bytes.take().freeze(), b"\r\ncontent-length: 100\r\n"[..]);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 100\r\n"[..]);
bytes.reserve(50);
write_content_length(101, &mut bytes);
assert_eq!(bytes.take().freeze(), b"\r\ncontent-length: 101\r\n"[..]);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 101\r\n"[..]);
bytes.reserve(50);
write_content_length(998, &mut bytes);
assert_eq!(bytes.take().freeze(), b"\r\ncontent-length: 998\r\n"[..]);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 998\r\n"[..]);
bytes.reserve(50);
write_content_length(1000, &mut bytes);
assert_eq!(bytes.take().freeze(), b"\r\ncontent-length: 1000\r\n"[..]);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 1000\r\n"[..]);
bytes.reserve(50);
write_content_length(1001, &mut bytes);
assert_eq!(bytes.take().freeze(), b"\r\ncontent-length: 1001\r\n"[..]);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 1001\r\n"[..]);
bytes.reserve(50);
write_content_length(5909, &mut bytes);
assert_eq!(bytes.take().freeze(), b"\r\ncontent-length: 5909\r\n"[..]);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 5909\r\n"[..]);
bytes.reserve(50);
write_content_length(9999, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 9999\r\n"[..]);
bytes.reserve(50);
write_content_length(10001, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 10001\r\n"[..]);
bytes.reserve(50);
write_content_length(59094, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 59094\r\n"[..]);
bytes.reserve(50);
write_content_length(99999, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 99999\r\n"[..]);
bytes.reserve(50);
write_content_length(590947, &mut bytes);
assert_eq!(
bytes.split().freeze(),
b"\r\ncontent-length: 590947\r\n"[..]
);
bytes.reserve(50);
write_content_length(999999, &mut bytes);
assert_eq!(
bytes.split().freeze(),
b"\r\ncontent-length: 999999\r\n"[..]
);
bytes.reserve(50);
write_content_length(5909471, &mut bytes);
assert_eq!(
bytes.split().freeze(),
b"\r\ncontent-length: 5909471\r\n"[..]
);
bytes.reserve(50);
write_content_length(59094718, &mut bytes);
assert_eq!(
bytes.split().freeze(),
b"\r\ncontent-length: 59094718\r\n"[..]
);
bytes.reserve(50);
write_content_length(4294973728, &mut bytes);
assert_eq!(
bytes.split().freeze(),
b"\r\ncontent-length: 4294973728\r\n"[..]
);
}
}

View File

@ -8,7 +8,7 @@ macro_rules! STATIC_RESP {
($name:ident, $status:expr) => {
#[allow(non_snake_case, missing_docs)]
pub fn $name() -> ResponseBuilder {
Response::build($status)
ResponseBuilder::new($status)
}
};
}
@ -29,7 +29,6 @@ impl Response {
STATIC_RESP!(AlreadyReported, StatusCode::ALREADY_REPORTED);
STATIC_RESP!(MultipleChoices, StatusCode::MULTIPLE_CHOICES);
STATIC_RESP!(MovedPermanenty, StatusCode::MOVED_PERMANENTLY);
STATIC_RESP!(MovedPermanently, StatusCode::MOVED_PERMANENTLY);
STATIC_RESP!(Found, StatusCode::FOUND);
STATIC_RESP!(SeeOther, StatusCode::SEE_OTHER);
@ -60,6 +59,8 @@ impl Response {
STATIC_RESP!(UnsupportedMediaType, StatusCode::UNSUPPORTED_MEDIA_TYPE);
STATIC_RESP!(RangeNotSatisfiable, StatusCode::RANGE_NOT_SATISFIABLE);
STATIC_RESP!(ExpectationFailed, StatusCode::EXPECTATION_FAILED);
STATIC_RESP!(UnprocessableEntity, StatusCode::UNPROCESSABLE_ENTITY);
STATIC_RESP!(TooManyRequests, StatusCode::TOO_MANY_REQUESTS);
STATIC_RESP!(InternalServerError, StatusCode::INTERNAL_SERVER_ERROR);
STATIC_RESP!(NotImplemented, StatusCode::NOT_IMPLEMENTED);

View File

@ -1,23 +1,16 @@
use std::cell::{Ref, RefMut};
use std::str;
use encoding::all::UTF_8;
use encoding::label::encoding_from_whatwg_label;
use encoding::EncodingRef;
use http::{header, HeaderMap};
use encoding_rs::{Encoding, UTF_8};
use http::header;
use mime::Mime;
use crate::error::{ContentTypeError, ParseError};
use crate::cookie::Cookie;
use crate::error::{ContentTypeError, CookieParseError, ParseError};
use crate::extensions::Extensions;
use crate::header::Header;
use crate::header::{Header, HeaderMap};
use crate::payload::Payload;
#[cfg(feature = "cookies")]
use crate::error::CookieParseError;
#[cfg(feature = "cookies")]
use cookie::Cookie;
#[cfg(feature = "cookies")]
struct Cookies(Vec<Cookie<'static>>);
/// Trait that implements general purpose operations on http messages
@ -32,10 +25,10 @@ pub trait HttpMessage: Sized {
fn take_payload(&mut self) -> Payload<Self::Stream>;
/// Request's extensions container
fn extensions(&self) -> Ref<Extensions>;
fn extensions(&self) -> Ref<'_, Extensions>;
/// Mutable reference to a the request's extensions container
fn extensions_mut(&self) -> RefMut<Extensions>;
fn extensions_mut(&self) -> RefMut<'_, Extensions>;
#[doc(hidden)]
/// Get a header
@ -64,10 +57,12 @@ pub trait HttpMessage: Sized {
/// Get content type encoding
///
/// UTF-8 is used by default, If request charset is not set.
fn encoding(&self) -> Result<EncodingRef, ContentTypeError> {
fn encoding(&self) -> Result<&'static Encoding, ContentTypeError> {
if let Some(mime_type) = self.mime_type()? {
if let Some(charset) = mime_type.get_param("charset") {
if let Some(enc) = encoding_from_whatwg_label(charset.as_str()) {
if let Some(enc) =
Encoding::for_label_no_replacement(charset.as_str().as_bytes())
{
Ok(enc)
} else {
Err(ContentTypeError::UnknownEncoding)
@ -110,8 +105,7 @@ pub trait HttpMessage: Sized {
/// Load request cookies.
#[inline]
#[cfg(feature = "cookies")]
fn cookies(&self) -> Result<Ref<Vec<Cookie<'static>>>, CookieParseError> {
fn cookies(&self) -> Result<Ref<'_, Vec<Cookie<'static>>>, CookieParseError> {
if self.extensions().get::<Cookies>().is_none() {
let mut cookies = Vec::new();
for hdr in self.headers().get_all(header::COOKIE) {
@ -131,7 +125,6 @@ pub trait HttpMessage: Sized {
}
/// Return request cookie.
#[cfg(feature = "cookies")]
fn cookie(&self, name: &str) -> Option<Cookie<'static>> {
if let Ok(cookies) = self.cookies() {
for cookie in cookies.iter() {
@ -160,12 +153,12 @@ where
}
/// Request's extensions container
fn extensions(&self) -> Ref<Extensions> {
fn extensions(&self) -> Ref<'_, Extensions> {
(**self).extensions()
}
/// Mutable reference to a the request's extensions container
fn extensions_mut(&self) -> RefMut<Extensions> {
fn extensions_mut(&self) -> RefMut<'_, Extensions> {
(**self).extensions_mut()
}
}
@ -173,9 +166,7 @@ where
#[cfg(test)]
mod tests {
use bytes::Bytes;
use encoding::all::ISO_8859_2;
use encoding::Encoding;
use mime;
use encoding_rs::ISO_8859_2;
use super::*;
use crate::test::TestRequest;
@ -230,7 +221,7 @@ mod tests {
"application/json; charset=ISO-8859-2",
)
.finish();
assert_eq!(ISO_8859_2.name(), req.encoding().unwrap().name());
assert_eq!(ISO_8859_2, req.encoding().unwrap());
}
#[test]

View File

@ -1,17 +1,25 @@
//! Basic http primitives for actix-net framework.
#![deny(rust_2018_idioms)]
#![allow(
clippy::type_complexity,
clippy::too_many_arguments,
clippy::new_without_default,
clippy::new_without_default_derive
clippy::borrow_interior_mutable_const
)]
#[macro_use]
extern crate log;
#[macro_use]
mod macros;
pub mod body;
mod builder;
pub mod client;
mod cloneable;
mod config;
#[cfg(feature = "compress")]
pub mod encoding;
mod extensions;
mod header;
@ -23,7 +31,9 @@ mod payload;
mod request;
mod response;
mod service;
mod time_parser;
pub use cookie;
pub mod error;
pub mod h1;
pub mod h2;
@ -35,27 +45,23 @@ pub use self::config::{KeepAlive, ServiceConfig};
pub use self::error::{Error, ResponseError, Result};
pub use self::extensions::Extensions;
pub use self::httpmessage::HttpMessage;
pub use self::message::{Message, RequestHead, ResponseHead};
pub use self::message::{Message, RequestHead, RequestHeadType, ResponseHead};
pub use self::payload::{Payload, PayloadStream};
pub use self::request::Request;
pub use self::response::{Response, ResponseBuilder};
pub use self::service::{HttpService, SendError, SendResponse};
pub use self::service::HttpService;
pub mod http {
//! Various HTTP related types
// re-exports
pub use http::header::{HeaderName, HeaderValue};
pub use http::uri::PathAndQuery;
pub use http::{uri, Error, Uri};
pub use http::{Method, StatusCode, Version};
#[doc(hidden)]
pub use http::{uri, Error, HeaderMap, HttpTryFrom, Uri};
#[doc(hidden)]
pub use http::uri::PathAndQuery;
#[cfg(feature = "cookies")]
pub use cookie::{Cookie, CookieBuilder};
pub use crate::cookie::{Cookie, CookieBuilder};
pub use crate::header::HeaderMap;
/// Various http headers
pub mod header {
@ -64,3 +70,10 @@ pub mod http {
pub use crate::header::ContentEncoding;
pub use crate::message::ConnectionType;
}
/// Http protocol
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum Protocol {
Http1,
Http2,
}

96
actix-http/src/macros.rs Normal file
View File

@ -0,0 +1,96 @@
#[macro_export]
macro_rules! downcast_get_type_id {
() => {
/// A helper method to get the type ID of the type
/// this trait is implemented on.
/// This method is unsafe to *implement*, since `downcast_ref` relies
/// on the returned `TypeId` to perform a cast.
///
/// Unfortunately, Rust has no notion of a trait method that is
/// unsafe to implement (marking it as `unsafe` makes it unsafe
/// to *call*). As a workaround, we require this method
/// to return a private type along with the `TypeId`. This
/// private type (`PrivateHelper`) has a private constructor,
/// making it impossible for safe code to construct outside of
/// this module. This ensures that safe code cannot violate
/// type-safety by implementing this method.
#[doc(hidden)]
fn __private_get_type_id__(&self) -> (std::any::TypeId, PrivateHelper)
where
Self: 'static,
{
(std::any::TypeId::of::<Self>(), PrivateHelper(()))
}
};
}
//Generate implementation for dyn $name
#[macro_export]
macro_rules! downcast {
($name:ident) => {
/// A struct with a private constructor, for use with
/// `__private_get_type_id__`. Its single field is private,
/// ensuring that it can only be constructed from this module
#[doc(hidden)]
pub struct PrivateHelper(());
impl dyn $name + 'static {
/// Downcasts generic body to a specific type.
pub fn downcast_ref<T: $name + 'static>(&self) -> Option<&T> {
if self.__private_get_type_id__().0 == std::any::TypeId::of::<T>() {
// SAFETY: external crates cannot override the default
// implementation of `__private_get_type_id__`, since
// it requires returning a private type. We can therefore
// rely on the returned `TypeId`, which ensures that this
// case is correct.
unsafe { Some(&*(self as *const dyn $name as *const T)) }
} else {
None
}
}
/// Downcasts a generic body to a mutable specific type.
pub fn downcast_mut<T: $name + 'static>(&mut self) -> Option<&mut T> {
if self.__private_get_type_id__().0 == std::any::TypeId::of::<T>() {
// SAFETY: external crates cannot override the default
// implementation of `__private_get_type_id__`, since
// it requires returning a private type. We can therefore
// rely on the returned `TypeId`, which ensures that this
// case is correct.
unsafe {
Some(&mut *(self as *const dyn $name as *const T as *mut T))
}
} else {
None
}
}
}
};
}
#[cfg(test)]
mod tests {
trait MB {
downcast_get_type_id!();
}
downcast!(MB);
impl MB for String {}
impl MB for () {}
#[actix_rt::test]
async fn test_any_casting() {
let mut body = String::from("hello cast");
let resp_body: &mut dyn MB = &mut body;
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast");
let body = &mut resp_body.downcast_mut::<String>().unwrap();
body.push('!');
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast!");
let not_body = resp_body.downcast_ref::<()>();
assert!(not_body.is_none());
}
}

View File

@ -1,11 +1,13 @@
use std::cell::{Ref, RefCell, RefMut};
use std::collections::VecDeque;
use std::net;
use std::rc::Rc;
use bitflags::bitflags;
use copyless::BoxHelper;
use crate::extensions::Extensions;
use crate::http::{header, HeaderMap, Method, StatusCode, Uri, Version};
use crate::header::HeaderMap;
use crate::http::{header, Method, StatusCode, Uri, Version};
/// Represents various types of connection
#[derive(Copy, Clone, PartialEq, Debug)]
@ -23,7 +25,9 @@ bitflags! {
const CLOSE = 0b0000_0001;
const KEEP_ALIVE = 0b0000_0010;
const UPGRADE = 0b0000_0100;
const NO_CHUNKING = 0b0000_1000;
const EXPECT = 0b0000_1000;
const NO_CHUNKING = 0b0001_0000;
const CAMEL_CASE = 0b0010_0000;
}
}
@ -41,6 +45,7 @@ pub struct RequestHead {
pub version: Version,
pub headers: HeaderMap,
pub extensions: RefCell<Extensions>,
pub peer_addr: Option<net::SocketAddr>,
flags: Flags,
}
@ -52,6 +57,7 @@ impl Default for RequestHead {
version: Version::HTTP_11,
headers: HeaderMap::with_capacity(16),
flags: Flags::empty(),
peer_addr: None,
extensions: RefCell::new(Extensions::new()),
}
}
@ -72,13 +78,13 @@ impl Head for RequestHead {
impl RequestHead {
/// Message extensions
#[inline]
pub fn extensions(&self) -> Ref<Extensions> {
pub fn extensions(&self) -> Ref<'_, Extensions> {
self.extensions.borrow()
}
/// Mutable reference to a the message's extensions
#[inline]
pub fn extensions_mut(&self) -> RefMut<Extensions> {
pub fn extensions_mut(&self) -> RefMut<'_, Extensions> {
self.extensions.borrow_mut()
}
@ -92,6 +98,23 @@ impl RequestHead {
&mut self.headers
}
/// Is to uppercase headers with Camel-Case.
/// Default is `false`
#[inline]
pub fn camel_case_headers(&self) -> bool {
self.flags.contains(Flags::CAMEL_CASE)
}
/// Set `true` to send headers which are formatted as Camel-Case.
#[inline]
pub fn set_camel_case_headers(&mut self, val: bool) {
if val {
self.flags.insert(Flags::CAMEL_CASE);
} else {
self.flags.remove(Flags::CAMEL_CASE);
}
}
#[inline]
/// Set connection type of the message
pub fn set_connection_type(&mut self, ctype: ConnectionType) {
@ -145,6 +168,47 @@ impl RequestHead {
self.flags.remove(Flags::NO_CHUNKING);
}
}
#[inline]
/// Request contains `EXPECT` header
pub fn expect(&self) -> bool {
self.flags.contains(Flags::EXPECT)
}
#[inline]
pub(crate) fn set_expect(&mut self) {
self.flags.insert(Flags::EXPECT);
}
}
#[derive(Debug)]
pub enum RequestHeadType {
Owned(RequestHead),
Rc(Rc<RequestHead>, Option<HeaderMap>),
}
impl RequestHeadType {
pub fn extra_headers(&self) -> Option<&HeaderMap> {
match self {
RequestHeadType::Owned(_) => None,
RequestHeadType::Rc(_, headers) => headers.as_ref(),
}
}
}
impl AsRef<RequestHead> for RequestHeadType {
fn as_ref(&self) -> &RequestHead {
match self {
RequestHeadType::Owned(head) => &head,
RequestHeadType::Rc(head, _) => head.as_ref(),
}
}
}
impl From<RequestHead> for RequestHeadType {
fn from(head: RequestHead) -> Self {
RequestHeadType::Owned(head)
}
}
#[derive(Debug)]
@ -157,41 +221,29 @@ pub struct ResponseHead {
flags: Flags,
}
impl Default for ResponseHead {
fn default() -> ResponseHead {
impl ResponseHead {
/// Create new instance of `ResponseHead` type
#[inline]
pub fn new(status: StatusCode) -> ResponseHead {
ResponseHead {
status,
version: Version::default(),
status: StatusCode::OK,
headers: HeaderMap::with_capacity(16),
headers: HeaderMap::with_capacity(12),
reason: None,
flags: Flags::empty(),
extensions: RefCell::new(Extensions::new()),
}
}
}
impl Head for ResponseHead {
fn clear(&mut self) {
self.reason = None;
self.flags = Flags::empty();
self.headers.clear();
}
fn pool() -> &'static MessagePool<Self> {
RESPONSE_POOL.with(|p| *p)
}
}
impl ResponseHead {
/// Message extensions
#[inline]
pub fn extensions(&self) -> Ref<Extensions> {
pub fn extensions(&self) -> Ref<'_, Extensions> {
self.extensions.borrow()
}
/// Mutable reference to a the message's extensions
#[inline]
pub fn extensions_mut(&self) -> RefMut<Extensions> {
pub fn extensions_mut(&self) -> RefMut<'_, Extensions> {
self.extensions.borrow_mut()
}
@ -288,7 +340,6 @@ impl ResponseHead {
pub struct Message<T: Head> {
head: Rc<T>,
pool: &'static MessagePool<T>,
}
impl<T: Head> Message<T> {
@ -302,7 +353,6 @@ impl<T: Head> Clone for Message<T> {
fn clone(&self) -> Self {
Message {
head: self.head.clone(),
pool: self.pool,
}
}
}
@ -324,39 +374,79 @@ impl<T: Head> std::ops::DerefMut for Message<T> {
impl<T: Head> Drop for Message<T> {
fn drop(&mut self) {
if Rc::strong_count(&self.head) == 1 {
self.pool.release(self.head.clone());
T::pool().release(self.head.clone());
}
}
}
pub(crate) struct BoxedResponseHead {
head: Option<Box<ResponseHead>>,
}
impl BoxedResponseHead {
/// Get new message from the pool of objects
pub fn new(status: StatusCode) -> Self {
RESPONSE_POOL.with(|p| p.get_message(status))
}
pub(crate) fn take(&mut self) -> Self {
BoxedResponseHead {
head: self.head.take(),
}
}
}
impl std::ops::Deref for BoxedResponseHead {
type Target = ResponseHead;
fn deref(&self) -> &Self::Target {
self.head.as_ref().unwrap()
}
}
impl std::ops::DerefMut for BoxedResponseHead {
fn deref_mut(&mut self) -> &mut Self::Target {
self.head.as_mut().unwrap()
}
}
impl Drop for BoxedResponseHead {
fn drop(&mut self) {
if let Some(head) = self.head.take() {
RESPONSE_POOL.with(move |p| p.release(head))
}
}
}
#[doc(hidden)]
/// Request's objects pool
pub struct MessagePool<T: Head>(RefCell<VecDeque<Rc<T>>>);
pub struct MessagePool<T: Head>(RefCell<Vec<Rc<T>>>);
#[doc(hidden)]
#[allow(clippy::vec_box)]
/// Request's objects pool
pub struct BoxedResponsePool(RefCell<Vec<Box<ResponseHead>>>);
thread_local!(static REQUEST_POOL: &'static MessagePool<RequestHead> = MessagePool::<RequestHead>::create());
thread_local!(static RESPONSE_POOL: &'static MessagePool<ResponseHead> = MessagePool::<ResponseHead>::create());
thread_local!(static RESPONSE_POOL: &'static BoxedResponsePool = BoxedResponsePool::create());
impl<T: Head> MessagePool<T> {
fn create() -> &'static MessagePool<T> {
let pool = MessagePool(RefCell::new(VecDeque::with_capacity(128)));
let pool = MessagePool(RefCell::new(Vec::with_capacity(128)));
Box::leak(Box::new(pool))
}
/// Get message from the pool
#[inline]
fn get_message(&'static self) -> Message<T> {
if let Some(mut msg) = self.0.borrow_mut().pop_front() {
if let Some(mut msg) = self.0.borrow_mut().pop() {
if let Some(r) = Rc::get_mut(&mut msg) {
r.clear();
}
Message {
head: msg,
pool: self,
}
Message { head: msg }
} else {
Message {
head: Rc::new(T::default()),
pool: self,
}
}
}
@ -366,7 +456,40 @@ impl<T: Head> MessagePool<T> {
fn release(&self, msg: Rc<T>) {
let v = &mut self.0.borrow_mut();
if v.len() < 128 {
v.push_front(msg);
v.push(msg);
}
}
}
impl BoxedResponsePool {
fn create() -> &'static BoxedResponsePool {
let pool = BoxedResponsePool(RefCell::new(Vec::with_capacity(128)));
Box::leak(Box::new(pool))
}
/// Get message from the pool
#[inline]
fn get_message(&'static self, status: StatusCode) -> BoxedResponseHead {
if let Some(mut head) = self.0.borrow_mut().pop() {
head.reason = None;
head.status = status;
head.headers.clear();
head.flags = Flags::empty();
BoxedResponseHead { head: Some(head) }
} else {
BoxedResponseHead {
head: Some(Box::alloc().init(ResponseHead::new(status))),
}
}
}
#[inline]
/// Release request instance
fn release(&self, msg: Box<ResponseHead>) {
let v = &mut self.0.borrow_mut();
if v.len() < 128 {
msg.extensions.borrow_mut().clear();
v.push(msg);
}
}
}

View File

@ -1,11 +1,14 @@
use std::pin::Pin;
use std::task::{Context, Poll};
use bytes::Bytes;
use futures::{Async, Poll, Stream};
use futures_core::Stream;
use h2::RecvStream;
use crate::error::PayloadError;
/// Type represent boxed payload
pub type PayloadStream = Box<dyn Stream<Item = Bytes, Error = PayloadError>>;
pub type PayloadStream = Pin<Box<dyn Stream<Item = Result<Bytes, PayloadError>>>>;
/// Type represent streaming payload
pub enum Payload<S = PayloadStream> {
@ -48,17 +51,20 @@ impl<S> Payload<S> {
impl<S> Stream for Payload<S>
where
S: Stream<Item = Bytes, Error = PayloadError>,
S: Stream<Item = Result<Bytes, PayloadError>> + Unpin,
{
type Item = Bytes;
type Error = PayloadError;
type Item = Result<Bytes, PayloadError>;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
match self {
Payload::None => Ok(Async::Ready(None)),
Payload::H1(ref mut pl) => pl.poll(),
Payload::H2(ref mut pl) => pl.poll(),
Payload::Stream(ref mut pl) => pl.poll(),
#[inline]
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Self::Item>> {
match self.get_mut() {
Payload::None => Poll::Ready(None),
Payload::H1(ref mut pl) => pl.readany(cx),
Payload::H2(ref mut pl) => Pin::new(pl).poll_next(cx),
Payload::Stream(ref mut pl) => Pin::new(pl).poll_next(cx),
}
}
}

View File

@ -1,9 +1,10 @@
use std::cell::{Ref, RefMut};
use std::fmt;
use std::{fmt, net};
use http::{header, HeaderMap, Method, Uri, Version};
use http::{header, Method, Uri, Version};
use crate::extensions::Extensions;
use crate::header::HeaderMap;
use crate::httpmessage::HttpMessage;
use crate::message::{Message, RequestHead};
use crate::payload::{Payload, PayloadStream};
@ -24,13 +25,13 @@ impl<P> HttpMessage for Request<P> {
/// Request extensions
#[inline]
fn extensions(&self) -> Ref<Extensions> {
fn extensions(&self) -> Ref<'_, Extensions> {
self.head.extensions()
}
/// Mutable reference to a the request's extensions
#[inline]
fn extensions_mut(&self) -> RefMut<Extensions> {
fn extensions_mut(&self) -> RefMut<'_, Extensions> {
self.head.extensions_mut()
}
@ -79,6 +80,11 @@ impl<P> Request<P> {
)
}
/// Get request's payload
pub fn payload(&mut self) -> &mut Payload<P> {
&mut self.payload
}
/// Get request's payload
pub fn take_payload(&mut self) -> Payload<P> {
std::mem::replace(&mut self.payload, Payload::None)
@ -138,6 +144,7 @@ impl<P> Request<P> {
}
/// Check if request requires connection upgrade
#[inline]
pub fn upgrade(&self) -> bool {
if let Some(conn) = self.head().headers.get(header::CONNECTION) {
if let Ok(s) = conn.to_str() {
@ -146,10 +153,19 @@ impl<P> Request<P> {
}
self.head().method == Method::CONNECT
}
/// Peer socket address
///
/// Peer address is actual socket address, if proxy is used in front of
/// actix http server, then peer address would be address of this proxy.
#[inline]
pub fn peer_addr(&self) -> Option<net::SocketAddr> {
self.head().peer_addr
}
}
impl<P> fmt::Debug for Request<P> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
writeln!(
f,
"\nRequest {:?} {}:{}",
@ -161,9 +177,33 @@ impl<P> fmt::Debug for Request<P> {
writeln!(f, " query: ?{:?}", q)?;
}
writeln!(f, " headers:")?;
for (key, val) in self.headers().iter() {
for (key, val) in self.headers() {
writeln!(f, " {:?}: {:?}", key, val)?;
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::convert::TryFrom;
#[test]
fn test_basics() {
let msg = Message::new();
let mut req = Request::from(msg);
req.headers_mut().insert(
header::CONTENT_TYPE,
header::HeaderValue::from_static("text/plain"),
);
assert!(req.headers().contains_key(header::CONTENT_TYPE));
*req.uri_mut() = Uri::try_from("/index.html?q=1").unwrap();
assert_eq!(req.uri().path(), "/index.html");
assert_eq!(req.uri().query(), Some("q=1"));
let s = format!("{:?}", req);
assert!(s.contains("Request HTTP/1.1 GET:/index.html"));
}
}

View File

@ -1,27 +1,27 @@
//! Http response
use std::cell::{Ref, RefMut};
use std::io::Write;
use std::convert::TryFrom;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, str};
use bytes::{BufMut, Bytes, BytesMut};
#[cfg(feature = "cookies")]
use cookie::{Cookie, CookieJar};
use futures::future::{ok, FutureResult, IntoFuture};
use futures::Stream;
use http::header::{self, HeaderName, HeaderValue};
use http::{Error as HttpError, HeaderMap, HttpTryFrom, StatusCode};
use bytes::{Bytes, BytesMut};
use futures_core::Stream;
use serde::Serialize;
use serde_json;
use crate::body::{Body, BodyStream, MessageBody, ResponseBody};
use crate::cookie::{Cookie, CookieJar};
use crate::error::Error;
use crate::extensions::Extensions;
use crate::header::{Header, IntoHeaderValue};
use crate::message::{ConnectionType, Message, ResponseHead};
use crate::http::header::{self, HeaderName, HeaderValue};
use crate::http::{Error as HttpError, HeaderMap, StatusCode};
use crate::message::{BoxedResponseHead, ConnectionType, ResponseHead};
/// An HTTP Response
pub struct Response<B = Body> {
head: Message<ResponseHead>,
head: BoxedResponseHead,
body: ResponseBody<B>,
error: Option<Error>,
}
@ -42,11 +42,8 @@ impl Response<Body> {
/// Constructs a response
#[inline]
pub fn new(status: StatusCode) -> Response {
let mut head: Message<ResponseHead> = Message::new();
head.status = status;
Response {
head,
head: BoxedResponseHead::new(status),
body: ResponseBody::Body(Body::Empty),
error: None,
}
@ -56,6 +53,9 @@ impl Response<Body> {
#[inline]
pub fn from_error(error: Error) -> Response {
let mut resp = error.as_response_error().error_response();
if resp.head.status == StatusCode::INTERNAL_SERVER_ERROR {
error!("Internal Server Error: {:?}", error);
}
resp.error = Some(error);
resp
}
@ -75,6 +75,16 @@ impl Response<Body> {
}
impl<B> Response<B> {
/// Constructs a response with body
#[inline]
pub fn with_body(status: StatusCode, body: B) -> Response<B> {
Response {
head: BoxedResponseHead::new(status),
body: ResponseBody::Body(body),
error: None,
}
}
#[inline]
/// Http message part of the response
pub fn head(&self) -> &ResponseHead {
@ -87,18 +97,6 @@ impl<B> Response<B> {
&mut *self.head
}
/// Constructs a response with body
#[inline]
pub fn with_body(status: StatusCode, body: B) -> Response<B> {
let mut head: Message<ResponseHead> = Message::new();
head.status = status;
Response {
head,
body: ResponseBody::Body(body),
error: None,
}
}
/// The source `error` for this response
#[inline]
pub fn error(&self) -> Option<&Error> {
@ -131,17 +129,15 @@ impl<B> Response<B> {
/// Get an iterator for the cookies set by this response
#[inline]
#[cfg(feature = "cookies")]
pub fn cookies(&self) -> CookieIter {
pub fn cookies(&self) -> CookieIter<'_> {
CookieIter {
iter: self.head.headers.get_all(header::SET_COOKIE).iter(),
iter: self.head.headers.get_all(header::SET_COOKIE),
}
}
/// Add a cookie to this response
#[inline]
#[cfg(feature = "cookies")]
pub fn add_cookie(&mut self, cookie: &Cookie) -> Result<(), HttpError> {
pub fn add_cookie(&mut self, cookie: &Cookie<'_>) -> Result<(), HttpError> {
let h = &mut self.head.headers;
HeaderValue::from_str(&cookie.to_string())
.map(|c| {
@ -153,12 +149,10 @@ impl<B> Response<B> {
/// Remove all cookies with the given name from this response. Returns
/// the number of cookies removed.
#[inline]
#[cfg(feature = "cookies")]
pub fn del_cookie(&mut self, name: &str) -> usize {
let h = &mut self.head.headers;
let vals: Vec<HeaderValue> = h
.get_all(header::SET_COOKIE)
.iter()
.map(|v| v.to_owned())
.collect();
h.remove(header::SET_COOKIE);
@ -189,14 +183,26 @@ impl<B> Response<B> {
self.head.keep_alive()
}
/// Get body os this response
/// Responses extensions
#[inline]
pub fn extensions(&self) -> Ref<'_, Extensions> {
self.head.extensions.borrow()
}
/// Mutable reference to a the response's extensions
#[inline]
pub fn extensions_mut(&mut self) -> RefMut<'_, Extensions> {
self.head.extensions.borrow_mut()
}
/// Get body of this response
#[inline]
pub fn body(&self) -> &ResponseBody<B> {
&self.body
}
/// Set a body
pub(crate) fn set_body<B2>(self, body: B2) -> Response<B2> {
pub fn set_body<B2>(self, body: B2) -> Response<B2> {
Response {
head: self.head,
body: ResponseBody::Body(body),
@ -204,8 +210,20 @@ impl<B> Response<B> {
}
}
/// Split response and body
pub fn into_parts(self) -> (Response<()>, ResponseBody<B>) {
(
Response {
head: self.head,
body: ResponseBody::Body(()),
error: self.error,
},
self.body,
)
}
/// Drop request's body
pub(crate) fn drop_body(self) -> Response<()> {
pub fn drop_body(self) -> Response<()> {
Response {
head: self.head,
body: ResponseBody::Body(()),
@ -233,8 +251,8 @@ impl<B> Response<B> {
let body = f(&mut self.head, self.body);
Response {
body,
head: self.head,
body: body,
error: self.error,
}
}
@ -246,7 +264,7 @@ impl<B> Response<B> {
}
impl<B: MessageBody> fmt::Debug for Response<B> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let res = writeln!(
f,
"\nResponse {:?} {}{}",
@ -258,27 +276,27 @@ impl<B: MessageBody> fmt::Debug for Response<B> {
for (key, val) in self.head.headers.iter() {
let _ = writeln!(f, " {:?}: {:?}", key, val);
}
let _ = writeln!(f, " body: {:?}", self.body.length());
let _ = writeln!(f, " body: {:?}", self.body.size());
res
}
}
impl IntoFuture for Response {
type Item = Response;
type Error = Error;
type Future = FutureResult<Response, Error>;
impl Future for Response {
type Output = Result<Response, Error>;
fn into_future(self) -> Self::Future {
ok(self)
fn poll(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(Ok(Response {
head: self.head.take(),
body: self.body.take_body(),
error: self.error.take(),
}))
}
}
#[cfg(feature = "cookies")]
pub struct CookieIter<'a> {
iter: header::ValueIter<'a, HeaderValue>,
iter: header::GetAll<'a>,
}
#[cfg(feature = "cookies")]
impl<'a> Iterator for CookieIter<'a> {
type Item = Cookie<'a>;
@ -298,22 +316,18 @@ impl<'a> Iterator for CookieIter<'a> {
/// This type can be used to construct an instance of `Response` through a
/// builder-like pattern.
pub struct ResponseBuilder {
head: Option<Message<ResponseHead>>,
head: Option<BoxedResponseHead>,
err: Option<HttpError>,
#[cfg(feature = "cookies")]
cookies: Option<CookieJar>,
}
impl ResponseBuilder {
#[inline]
/// Create response builder
pub fn new(status: StatusCode) -> Self {
let mut head: Message<ResponseHead> = Message::new();
head.status = status;
ResponseBuilder {
head: Some(head),
head: Some(BoxedResponseHead::new(status)),
err: None,
#[cfg(feature = "cookies")]
cookies: None,
}
}
@ -329,18 +343,16 @@ impl ResponseBuilder {
/// Set a header.
///
/// ```rust,ignore
/// # extern crate actix_web;
/// use actix_web::{http, Request, Response, Result};
/// ```rust
/// use actix_http::{http, Request, Response, Result};
///
/// fn index(req: HttpRequest) -> Result<Response> {
/// fn index(req: Request) -> Result<Response> {
/// Ok(Response::Ok()
/// .set(http::header::IfModifiedSince(
/// "Sun, 07 Nov 1994 08:48:37 GMT".parse()?,
/// ))
/// .finish())
/// }
/// fn main() {}
/// ```
#[doc(hidden)]
pub fn set<H: Header>(&mut self, hdr: H) -> &mut Self {
@ -357,21 +369,20 @@ impl ResponseBuilder {
/// Append a header to existing headers.
///
/// ```rust,ignore
/// # extern crate actix_web;
/// use actix_web::{http, Request, Response};
/// ```rust
/// use actix_http::{http, Request, Response};
///
/// fn index(req: HttpRequest) -> Response {
/// fn index(req: Request) -> Response {
/// Response::Ok()
/// .header("X-TEST", "value")
/// .header(http::header::CONTENT_TYPE, "application/json")
/// .finish()
/// }
/// fn main() {}
/// ```
pub fn header<K, V>(&mut self, key: K, value: V) -> &mut Self
where
HeaderName: HttpTryFrom<K>,
HeaderName: TryFrom<K>,
<HeaderName as TryFrom<K>>::Error: Into<HttpError>,
V: IntoHeaderValue,
{
if let Some(parts) = parts(&mut self.head, &self.err) {
@ -390,21 +401,20 @@ impl ResponseBuilder {
/// Set a header.
///
/// ```rust,ignore
/// # extern crate actix_web;
/// use actix_web::{http, Request, Response};
/// ```rust
/// use actix_http::{http, Request, Response};
///
/// fn index(req: HttpRequest) -> Response {
/// fn index(req: Request) -> Response {
/// Response::Ok()
/// .set_header("X-TEST", "value")
/// .set_header(http::header::CONTENT_TYPE, "application/json")
/// .finish()
/// }
/// fn main() {}
/// ```
pub fn set_header<K, V>(&mut self, key: K, value: V) -> &mut Self
where
HeaderName: HttpTryFrom<K>,
HeaderName: TryFrom<K>,
<HeaderName as TryFrom<K>>::Error: Into<HttpError>,
V: IntoHeaderValue,
{
if let Some(parts) = parts(&mut self.head, &self.err) {
@ -462,7 +472,9 @@ impl ResponseBuilder {
/// Disable chunked transfer encoding for HTTP/1.1 streaming responses.
#[inline]
pub fn no_chunking(&mut self) -> &mut Self {
pub fn no_chunking(&mut self, len: u64) -> &mut Self {
self.header(header::CONTENT_LENGTH, len);
if let Some(parts) = parts(&mut self.head, &self.err) {
parts.no_chunking(true);
}
@ -473,7 +485,8 @@ impl ResponseBuilder {
#[inline]
pub fn content_type<V>(&mut self, value: V) -> &mut Self
where
HeaderValue: HttpTryFrom<V>,
HeaderValue: TryFrom<V>,
<HeaderValue as TryFrom<V>>::Error: Into<HttpError>,
{
if let Some(parts) = parts(&mut self.head, &self.err) {
match HeaderValue::try_from(value) {
@ -486,21 +499,12 @@ impl ResponseBuilder {
self
}
/// Set content length
#[inline]
pub fn content_length(&mut self, len: u64) -> &mut Self {
let mut wrt = BytesMut::new().writer();
let _ = write!(wrt, "{}", len);
self.header(header::CONTENT_LENGTH, wrt.get_mut().take().freeze())
}
/// Set a cookie
///
/// ```rust,ignore
/// # extern crate actix_web;
/// use actix_web::{http, HttpRequest, Response, Result};
/// ```rust
/// use actix_http::{http, Request, Response};
///
/// fn index(req: HttpRequest) -> Response {
/// fn index(req: Request) -> Response {
/// Response::Ok()
/// .cookie(
/// http::Cookie::build("name", "value")
@ -513,7 +517,6 @@ impl ResponseBuilder {
/// .finish()
/// }
/// ```
#[cfg(feature = "cookies")]
pub fn cookie<'c>(&mut self, cookie: Cookie<'c>) -> &mut Self {
if self.cookies.is_none() {
let mut jar = CookieJar::new();
@ -527,11 +530,10 @@ impl ResponseBuilder {
/// Remove cookie
///
/// ```rust,ignore
/// # extern crate actix_web;
/// use actix_web::{http, HttpRequest, Response, Result};
/// ```rust
/// use actix_http::{http, Request, Response, HttpMessage};
///
/// fn index(req: &HttpRequest) -> Response {
/// fn index(req: Request) -> Response {
/// let mut builder = Response::Ok();
///
/// if let Some(ref cookie) = req.cookie("name") {
@ -541,17 +543,14 @@ impl ResponseBuilder {
/// builder.finish()
/// }
/// ```
#[cfg(feature = "cookies")]
pub fn del_cookie<'a>(&mut self, cookie: &Cookie<'a>) -> &mut Self {
{
if self.cookies.is_none() {
self.cookies = Some(CookieJar::new())
}
let jar = self.cookies.as_mut().unwrap();
let cookie = cookie.clone().into_owned();
jar.add_original(cookie.clone());
jar.remove(cookie);
if self.cookies.is_none() {
self.cookies = Some(CookieJar::new())
}
let jar = self.cookies.as_mut().unwrap();
let cookie = cookie.clone().into_owned();
jar.add_original(cookie.clone());
jar.remove(cookie);
self
}
@ -581,18 +580,19 @@ impl ResponseBuilder {
/// Responses extensions
#[inline]
pub fn extensions(&self) -> Ref<Extensions> {
pub fn extensions(&self) -> Ref<'_, Extensions> {
let head = self.head.as_ref().expect("cannot reuse response builder");
head.extensions.borrow()
}
/// Mutable reference to a the response's extensions
#[inline]
pub fn extensions_mut(&mut self) -> RefMut<Extensions> {
pub fn extensions_mut(&mut self) -> RefMut<'_, Extensions> {
let head = self.head.as_ref().expect("cannot reuse response builder");
head.extensions.borrow_mut()
}
#[inline]
/// Set a body and generate `Response`.
///
/// `ResponseBuilder` can not be used after this call.
@ -608,20 +608,14 @@ impl ResponseBuilder {
return Response::from(Error::from(e)).into_body();
}
#[allow(unused_mut)]
let mut response = self.head.take().expect("cannot reuse response builder");
#[cfg(feature = "cookies")]
{
if let Some(ref jar) = self.cookies {
for cookie in jar.delta() {
match HeaderValue::from_str(&cookie.to_string()) {
Ok(val) => {
let _ = response.headers.append(header::SET_COOKIE, val);
}
Err(e) => return Response::from(Error::from(e)).into_body(),
};
}
if let Some(ref jar) = self.cookies {
for cookie in jar.delta() {
match HeaderValue::from_str(&cookie.to_string()) {
Ok(val) => response.headers.append(header::SET_COOKIE, val),
Err(e) => return Response::from(Error::from(e)).into_body(),
};
}
}
@ -638,12 +632,13 @@ impl ResponseBuilder {
/// `ResponseBuilder` can not be used after this call.
pub fn streaming<S, E>(&mut self, stream: S) -> Response
where
S: Stream<Item = Bytes, Error = E> + 'static,
S: Stream<Item = Result<Bytes, E>> + Unpin + 'static,
E: Into<Error> + 'static,
{
self.body(Body::from_message(BodyStream::new(stream)))
}
#[inline]
/// Set a json body and generate `Response`
///
/// `ResponseBuilder` can not be used after this call.
@ -685,7 +680,6 @@ impl ResponseBuilder {
ResponseBuilder {
head: self.head.take(),
err: self.err.take(),
#[cfg(feature = "cookies")]
cookies: self.cookies.take(),
}
}
@ -693,22 +687,20 @@ impl ResponseBuilder {
#[inline]
fn parts<'a>(
parts: &'a mut Option<Message<ResponseHead>>,
parts: &'a mut Option<BoxedResponseHead>,
err: &Option<HttpError>,
) -> Option<&'a mut Message<ResponseHead>> {
) -> Option<&'a mut ResponseHead> {
if err.is_some() {
return None;
}
parts.as_mut()
parts.as_mut().map(|r| &mut **r)
}
/// Convert `Response` to a `ResponseBuilder`. Body get dropped.
impl<B> From<Response<B>> for ResponseBuilder {
fn from(res: Response<B>) -> ResponseBuilder {
// If this response has cookies, load them into a jar
#[cfg(feature = "cookies")]
let mut jar: Option<CookieJar> = None;
#[cfg(feature = "cookies")]
for c in res.cookies() {
if let Some(ref mut j) = jar {
j.add_original(c.into_owned());
@ -722,7 +714,6 @@ impl<B> From<Response<B>> for ResponseBuilder {
ResponseBuilder {
head: Some(res.head),
err: None,
#[cfg(feature = "cookies")]
cookies: jar,
}
}
@ -732,48 +723,61 @@ impl<B> From<Response<B>> for ResponseBuilder {
impl<'a> From<&'a ResponseHead> for ResponseBuilder {
fn from(head: &'a ResponseHead) -> ResponseBuilder {
// If this response has cookies, load them into a jar
#[cfg(feature = "cookies")]
let mut jar: Option<CookieJar> = None;
#[cfg(feature = "cookies")]
{
let cookies = CookieIter {
iter: head.headers.get_all(header::SET_COOKIE).iter(),
};
for c in cookies {
if let Some(ref mut j) = jar {
j.add_original(c.into_owned());
} else {
let mut j = CookieJar::new();
j.add_original(c.into_owned());
jar = Some(j);
}
let cookies = CookieIter {
iter: head.headers.get_all(header::SET_COOKIE),
};
for c in cookies {
if let Some(ref mut j) = jar {
j.add_original(c.into_owned());
} else {
let mut j = CookieJar::new();
j.add_original(c.into_owned());
jar = Some(j);
}
}
let mut msg: Message<ResponseHead> = Message::new();
let mut msg = BoxedResponseHead::new(head.status);
msg.version = head.version;
msg.status = head.status;
msg.reason = head.reason;
msg.headers = head.headers.clone();
for (k, v) in &head.headers {
msg.headers.append(k.clone(), v.clone());
}
msg.no_chunking(!head.chunked());
ResponseBuilder {
head: Some(msg),
err: None,
#[cfg(feature = "cookies")]
cookies: jar,
}
}
}
impl IntoFuture for ResponseBuilder {
type Item = Response;
type Error = Error;
type Future = FutureResult<Response, Error>;
impl Future for ResponseBuilder {
type Output = Result<Response, Error>;
fn into_future(mut self) -> Self::Future {
ok(self.finish())
fn poll(mut self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(Ok(self.finish()))
}
}
impl fmt::Debug for ResponseBuilder {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let head = self.head.as_ref().unwrap();
let res = writeln!(
f,
"\nResponseBuilder {:?} {}{}",
head.version,
head.status,
head.reason.unwrap_or(""),
);
let _ = writeln!(f, " headers:");
for (key, val) in head.headers.iter() {
let _ = writeln!(f, " {:?}: {:?}", key, val);
}
res
}
}
@ -845,7 +849,7 @@ impl From<BytesMut> for Response {
mod tests {
use super::*;
use crate::body::Body;
use crate::http::header::{HeaderValue, CONTENT_TYPE, COOKIE};
use crate::http::header::{HeaderValue, CONTENT_TYPE, COOKIE, SET_COOKIE};
#[test]
fn test_debug() {
@ -858,7 +862,6 @@ mod tests {
}
#[test]
#[cfg(feature = "cookies")]
fn test_response_cookies() {
use crate::httpmessage::HttpMessage;
@ -877,13 +880,12 @@ mod tests {
.max_age(time::Duration::days(1))
.finish(),
)
.del_cookie(&cookies[0])
.del_cookie(&cookies[1])
.finish();
let mut val: Vec<_> = resp
.headers()
.get_all("Set-Cookie")
.iter()
.get_all(SET_COOKIE)
.map(|v| v.to_str().unwrap().to_owned())
.collect();
val.sort();
@ -895,7 +897,6 @@ mod tests {
}
#[test]
#[cfg(feature = "cookies")]
fn test_update_response_cookies() {
let mut r = Response::Ok()
.cookie(crate::http::Cookie::new("original", "val100"))
@ -913,9 +914,9 @@ mod tests {
let mut iter = r.cookies();
let v = iter.next().unwrap();
assert_eq!((v.name(), v.value()), ("original", "val100"));
let v = iter.next().unwrap();
assert_eq!((v.name(), v.value()), ("cookie3", "val300"));
let v = iter.next().unwrap();
assert_eq!((v.name(), v.value()), ("original", "val100"));
}
#[test]
@ -986,6 +987,14 @@ mod tests {
assert_eq!(resp.body().get_ref(), b"[\"v1\",\"v2\",\"v3\"]");
}
#[test]
fn test_serde_json_in_body() {
use serde_json::json;
let resp =
Response::build(StatusCode::OK).body(json!({"test-key":"test-value"}));
assert_eq!(resp.body().get_ref(), br#"{"test-key":"test-value"}"#);
}
#[test]
fn test_into_response() {
let resp: Response = "test".into();
@ -1051,12 +1060,12 @@ mod tests {
resp.headers().get(CONTENT_TYPE).unwrap(),
HeaderValue::from_static("application/octet-stream")
);
assert_eq!(resp.status(), StatusCode::OK);
assert_eq!(resp.body().get_ref(), b"test");
}
#[test]
#[cfg(feature = "cookies")]
fn test_into_builder() {
let mut resp: Response = "test".into();
assert_eq!(resp.status(), StatusCode::OK);

681
actix-http/src/service.rs Normal file
View File

@ -0,0 +1,681 @@
use std::marker::PhantomData;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, net, rc};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_rt::net::TcpStream;
use actix_service::{pipeline_factory, IntoServiceFactory, Service, ServiceFactory};
use bytes::Bytes;
use futures_core::{ready, Future};
use futures_util::future::ok;
use h2::server::{self, Handshake};
use pin_project::pin_project;
use crate::body::MessageBody;
use crate::builder::HttpServiceBuilder;
use crate::cloneable::CloneableService;
use crate::config::{KeepAlive, ServiceConfig};
use crate::error::{DispatchError, Error};
use crate::helpers::DataFactory;
use crate::request::Request;
use crate::response::Response;
use crate::{h1, h2::Dispatcher, Protocol};
/// `ServiceFactory` HTTP1.1/HTTP2 transport implementation
pub struct HttpService<T, S, B, X = h1::ExpectHandler, U = h1::UpgradeHandler<T>> {
srv: S,
cfg: ServiceConfig,
expect: X,
upgrade: Option<U>,
on_connect: Option<rc::Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
_t: PhantomData<(T, B)>,
}
impl<T, S, B> HttpService<T, S, B>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
{
/// Create builder for `HttpService` instance.
pub fn build() -> HttpServiceBuilder<T, S> {
HttpServiceBuilder::new()
}
}
impl<T, S, B> HttpService<T, S, B>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
{
/// Create new `HttpService` instance.
pub fn new<F: IntoServiceFactory<S>>(service: F) -> Self {
let cfg = ServiceConfig::new(KeepAlive::Timeout(5), 5000, 0, false, None);
HttpService {
cfg,
srv: service.into_factory(),
expect: h1::ExpectHandler,
upgrade: None,
on_connect: None,
_t: PhantomData,
}
}
/// Create new `HttpService` instance with config.
pub(crate) fn with_config<F: IntoServiceFactory<S>>(
cfg: ServiceConfig,
service: F,
) -> Self {
HttpService {
cfg,
srv: service.into_factory(),
expect: h1::ExpectHandler,
upgrade: None,
on_connect: None,
_t: PhantomData,
}
}
}
impl<T, S, B, X, U> HttpService<T, S, B, X, U>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody,
{
/// Provide service for `EXPECT: 100-Continue` support.
///
/// Service get called with request that contains `EXPECT` header.
/// Service must return request in case of success, in that case
/// request will be forwarded to main service.
pub fn expect<X1>(self, expect: X1) -> HttpService<T, S, B, X1, U>
where
X1: ServiceFactory<Config = (), Request = Request, Response = Request>,
X1::Error: Into<Error>,
X1::InitError: fmt::Debug,
<X1::Service as Service>::Future: 'static,
{
HttpService {
expect,
cfg: self.cfg,
srv: self.srv,
upgrade: self.upgrade,
on_connect: self.on_connect,
_t: PhantomData,
}
}
/// Provide service for custom `Connection: UPGRADE` support.
///
/// If service is provided then normal requests handling get halted
/// and this service get called with original request and framed object.
pub fn upgrade<U1>(self, upgrade: Option<U1>) -> HttpService<T, S, B, X, U1>
where
U1: ServiceFactory<
Config = (),
Request = (Request, Framed<T, h1::Codec>),
Response = (),
>,
U1::Error: fmt::Display,
U1::InitError: fmt::Debug,
<U1::Service as Service>::Future: 'static,
{
HttpService {
upgrade,
cfg: self.cfg,
srv: self.srv,
expect: self.expect,
on_connect: self.on_connect,
_t: PhantomData,
}
}
/// Set on connect callback.
pub(crate) fn on_connect(
mut self,
f: Option<rc::Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
) -> Self {
self.on_connect = f;
self
}
}
impl<S, B, X, U> HttpService<TcpStream, S, B, X, U>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
<X::Service as Service>::Future: 'static,
U: ServiceFactory<
Config = (),
Request = (Request, Framed<TcpStream, h1::Codec>),
Response = (),
>,
U::Error: fmt::Display + Into<Error>,
U::InitError: fmt::Debug,
<U::Service as Service>::Future: 'static,
{
/// Create simple tcp stream service
pub fn tcp(
self,
) -> impl ServiceFactory<
Config = (),
Request = TcpStream,
Response = (),
Error = DispatchError,
InitError = (),
> {
pipeline_factory(|io: TcpStream| {
let peer_addr = io.peer_addr().ok();
ok((io, Protocol::Http1, peer_addr))
})
.and_then(self)
}
}
#[cfg(feature = "openssl")]
mod openssl {
use super::*;
use actix_tls::openssl::{Acceptor, SslAcceptor, SslStream};
use actix_tls::{openssl::HandshakeError, TlsError};
impl<S, B, X, U> HttpService<SslStream<TcpStream>, S, B, X, U>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
<X::Service as Service>::Future: 'static,
U: ServiceFactory<
Config = (),
Request = (Request, Framed<SslStream<TcpStream>, h1::Codec>),
Response = (),
>,
U::Error: fmt::Display + Into<Error>,
U::InitError: fmt::Debug,
<U::Service as Service>::Future: 'static,
{
/// Create openssl based service
pub fn openssl(
self,
acceptor: SslAcceptor,
) -> impl ServiceFactory<
Config = (),
Request = TcpStream,
Response = (),
Error = TlsError<HandshakeError<TcpStream>, DispatchError>,
InitError = (),
> {
pipeline_factory(
Acceptor::new(acceptor)
.map_err(TlsError::Tls)
.map_init_err(|_| panic!()),
)
.and_then(|io: SslStream<TcpStream>| {
let proto = if let Some(protos) = io.ssl().selected_alpn_protocol() {
if protos.windows(2).any(|window| window == b"h2") {
Protocol::Http2
} else {
Protocol::Http1
}
} else {
Protocol::Http1
};
let peer_addr = io.get_ref().peer_addr().ok();
ok((io, proto, peer_addr))
})
.and_then(self.map_err(TlsError::Service))
}
}
}
#[cfg(feature = "rustls")]
mod rustls {
use super::*;
use actix_tls::rustls::{Acceptor, ServerConfig, Session, TlsStream};
use actix_tls::TlsError;
use std::io;
impl<S, B, X, U> HttpService<TlsStream<TcpStream>, S, B, X, U>
where
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
<X::Service as Service>::Future: 'static,
U: ServiceFactory<
Config = (),
Request = (Request, Framed<TlsStream<TcpStream>, h1::Codec>),
Response = (),
>,
U::Error: fmt::Display + Into<Error>,
U::InitError: fmt::Debug,
<U::Service as Service>::Future: 'static,
{
/// Create openssl based service
pub fn rustls(
self,
mut config: ServerConfig,
) -> impl ServiceFactory<
Config = (),
Request = TcpStream,
Response = (),
Error = TlsError<io::Error, DispatchError>,
InitError = (),
> {
let protos = vec!["h2".to_string().into(), "http/1.1".to_string().into()];
config.set_protocols(&protos);
pipeline_factory(
Acceptor::new(config)
.map_err(TlsError::Tls)
.map_init_err(|_| panic!()),
)
.and_then(|io: TlsStream<TcpStream>| {
let proto = if let Some(protos) = io.get_ref().1.get_alpn_protocol() {
if protos.windows(2).any(|window| window == b"h2") {
Protocol::Http2
} else {
Protocol::Http1
}
} else {
Protocol::Http1
};
let peer_addr = io.get_ref().0.peer_addr().ok();
ok((io, proto, peer_addr))
})
.and_then(self.map_err(TlsError::Service))
}
}
}
impl<T, S, B, X, U> ServiceFactory for HttpService<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
S: ServiceFactory<Config = (), Request = Request>,
S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
X: ServiceFactory<Config = (), Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
<X::Service as Service>::Future: 'static,
U: ServiceFactory<
Config = (),
Request = (Request, Framed<T, h1::Codec>),
Response = (),
>,
U::Error: fmt::Display + Into<Error>,
U::InitError: fmt::Debug,
<U::Service as Service>::Future: 'static,
{
type Config = ();
type Request = (T, Protocol, Option<net::SocketAddr>);
type Response = ();
type Error = DispatchError;
type InitError = ();
type Service = HttpServiceHandler<T, S::Service, B, X::Service, U::Service>;
type Future = HttpServiceResponse<T, S, B, X, U>;
fn new_service(&self, _: ()) -> Self::Future {
HttpServiceResponse {
fut: self.srv.new_service(()),
fut_ex: Some(self.expect.new_service(())),
fut_upg: self.upgrade.as_ref().map(|f| f.new_service(())),
expect: None,
upgrade: None,
on_connect: self.on_connect.clone(),
cfg: self.cfg.clone(),
_t: PhantomData,
}
}
}
#[doc(hidden)]
#[pin_project]
pub struct HttpServiceResponse<
T,
S: ServiceFactory,
B,
X: ServiceFactory,
U: ServiceFactory,
> {
#[pin]
fut: S::Future,
#[pin]
fut_ex: Option<X::Future>,
#[pin]
fut_upg: Option<U::Future>,
expect: Option<X::Service>,
upgrade: Option<U::Service>,
on_connect: Option<rc::Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
cfg: ServiceConfig,
_t: PhantomData<(T, B)>,
}
impl<T, S, B, X, U> Future for HttpServiceResponse<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
S: ServiceFactory<Request = Request>,
S::Error: Into<Error> + 'static,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>> + 'static,
<S::Service as Service>::Future: 'static,
B: MessageBody + 'static,
X: ServiceFactory<Request = Request, Response = Request>,
X::Error: Into<Error>,
X::InitError: fmt::Debug,
<X::Service as Service>::Future: 'static,
U: ServiceFactory<Request = (Request, Framed<T, h1::Codec>), Response = ()>,
U::Error: fmt::Display,
U::InitError: fmt::Debug,
<U::Service as Service>::Future: 'static,
{
type Output =
Result<HttpServiceHandler<T, S::Service, B, X::Service, U::Service>, ()>;
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.as_mut().project();
if let Some(fut) = this.fut_ex.as_pin_mut() {
let expect = ready!(fut
.poll(cx)
.map_err(|e| log::error!("Init http service error: {:?}", e)))?;
this = self.as_mut().project();
*this.expect = Some(expect);
this.fut_ex.set(None);
}
if let Some(fut) = this.fut_upg.as_pin_mut() {
let upgrade = ready!(fut
.poll(cx)
.map_err(|e| log::error!("Init http service error: {:?}", e)))?;
this = self.as_mut().project();
*this.upgrade = Some(upgrade);
this.fut_ex.set(None);
}
let result = ready!(this
.fut
.poll(cx)
.map_err(|e| log::error!("Init http service error: {:?}", e)));
Poll::Ready(result.map(|service| {
let this = self.as_mut().project();
HttpServiceHandler::new(
this.cfg.clone(),
service,
this.expect.take().unwrap(),
this.upgrade.take(),
this.on_connect.clone(),
)
}))
}
}
/// `Service` implementation for http transport
pub struct HttpServiceHandler<T, S: Service, B, X: Service, U: Service> {
srv: CloneableService<S>,
expect: CloneableService<X>,
upgrade: Option<CloneableService<U>>,
cfg: ServiceConfig,
on_connect: Option<rc::Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
_t: PhantomData<(T, B, X)>,
}
impl<T, S, B, X, U> HttpServiceHandler<T, S, B, X, U>
where
S: Service<Request = Request>,
S::Error: Into<Error> + 'static,
S::Future: 'static,
S::Response: Into<Response<B>> + 'static,
B: MessageBody + 'static,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, h1::Codec>), Response = ()>,
U::Error: fmt::Display,
{
fn new(
cfg: ServiceConfig,
srv: S,
expect: X,
upgrade: Option<U>,
on_connect: Option<rc::Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,
) -> HttpServiceHandler<T, S, B, X, U> {
HttpServiceHandler {
cfg,
on_connect,
srv: CloneableService::new(srv),
expect: CloneableService::new(expect),
upgrade: upgrade.map(CloneableService::new),
_t: PhantomData,
}
}
}
impl<T, S, B, X, U> Service for HttpServiceHandler<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request = Request>,
S::Error: Into<Error> + 'static,
S::Future: 'static,
S::Response: Into<Response<B>> + 'static,
B: MessageBody + 'static,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, h1::Codec>), Response = ()>,
U::Error: fmt::Display + Into<Error>,
{
type Request = (T, Protocol, Option<net::SocketAddr>);
type Response = ();
type Error = DispatchError;
type Future = HttpServiceHandlerResponse<T, S, B, X, U>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let ready = self
.expect
.poll_ready(cx)
.map_err(|e| {
let e = e.into();
log::error!("Http service readiness error: {:?}", e);
DispatchError::Service(e)
})?
.is_ready();
let ready = self
.srv
.poll_ready(cx)
.map_err(|e| {
let e = e.into();
log::error!("Http service readiness error: {:?}", e);
DispatchError::Service(e)
})?
.is_ready()
&& ready;
let ready = if let Some(ref mut upg) = self.upgrade {
upg.poll_ready(cx)
.map_err(|e| {
let e = e.into();
log::error!("Http service readiness error: {:?}", e);
DispatchError::Service(e)
})?
.is_ready()
&& ready
} else {
ready
};
if ready {
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
}
fn call(&mut self, (io, proto, peer_addr): Self::Request) -> Self::Future {
let on_connect = if let Some(ref on_connect) = self.on_connect {
Some(on_connect(&io))
} else {
None
};
match proto {
Protocol::Http2 => HttpServiceHandlerResponse {
state: State::H2Handshake(Some((
server::handshake(io),
self.cfg.clone(),
self.srv.clone(),
on_connect,
peer_addr,
))),
},
Protocol::Http1 => HttpServiceHandlerResponse {
state: State::H1(h1::Dispatcher::new(
io,
self.cfg.clone(),
self.srv.clone(),
self.expect.clone(),
self.upgrade.clone(),
on_connect,
peer_addr,
)),
},
}
}
}
#[pin_project(project = StateProj)]
enum State<T, S, B, X, U>
where
S: Service<Request = Request>,
S::Future: 'static,
S::Error: Into<Error>,
T: AsyncRead + AsyncWrite + Unpin,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, h1::Codec>), Response = ()>,
U::Error: fmt::Display,
{
H1(#[pin] h1::Dispatcher<T, S, B, X, U>),
H2(#[pin] Dispatcher<T, S, B>),
H2Handshake(
Option<(
Handshake<T, Bytes>,
ServiceConfig,
CloneableService<S>,
Option<Box<dyn DataFactory>>,
Option<net::SocketAddr>,
)>,
),
}
#[pin_project]
pub struct HttpServiceHandlerResponse<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request = Request>,
S::Error: Into<Error> + 'static,
S::Future: 'static,
S::Response: Into<Response<B>> + 'static,
B: MessageBody + 'static,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, h1::Codec>), Response = ()>,
U::Error: fmt::Display,
{
#[pin]
state: State<T, S, B, X, U>,
}
impl<T, S, B, X, U> Future for HttpServiceHandlerResponse<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request = Request>,
S::Error: Into<Error> + 'static,
S::Future: 'static,
S::Response: Into<Response<B>> + 'static,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, h1::Codec>), Response = ()>,
U::Error: fmt::Display,
{
type Output = Result<(), DispatchError>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
self.project().state.poll(cx)
}
}
impl<T, S, B, X, U> State<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request = Request>,
S::Error: Into<Error> + 'static,
S::Response: Into<Response<B>> + 'static,
B: MessageBody + 'static,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, h1::Codec>), Response = ()>,
U::Error: fmt::Display,
{
fn poll(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), DispatchError>> {
match self.as_mut().project() {
StateProj::H1(disp) => disp.poll(cx),
StateProj::H2(disp) => disp.poll(cx),
StateProj::H2Handshake(ref mut data) => {
let conn = if let Some(ref mut item) = data {
match Pin::new(&mut item.0).poll(cx) {
Poll::Ready(Ok(conn)) => conn,
Poll::Ready(Err(err)) => {
trace!("H2 handshake error: {}", err);
return Poll::Ready(Err(err.into()));
}
Poll::Pending => return Poll::Pending,
}
} else {
panic!()
};
let (_, cfg, srv, on_connect, peer_addr) = data.take().unwrap();
self.set(State::H2(Dispatcher::new(
srv, conn, on_connect, cfg, None, peer_addr,
)));
self.poll(cx)
}
}
}
}

View File

@ -1,5 +0,0 @@
mod senderror;
mod service;
pub use self::senderror::{SendError, SendResponse};
pub use self::service::HttpService;

View File

@ -1,241 +0,0 @@
use std::marker::PhantomData;
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_service::{NewService, Service};
use futures::future::{ok, Either, FutureResult};
use futures::{Async, Future, Poll, Sink};
use crate::body::{BodySize, MessageBody, ResponseBody};
use crate::error::{Error, ResponseError};
use crate::h1::{Codec, Message};
use crate::response::Response;
pub struct SendError<T, R, E>(PhantomData<(T, R, E)>);
impl<T, R, E> Default for SendError<T, R, E>
where
T: AsyncRead + AsyncWrite,
E: ResponseError,
{
fn default() -> Self {
SendError(PhantomData)
}
}
impl<T, R, E> NewService for SendError<T, R, E>
where
T: AsyncRead + AsyncWrite,
E: ResponseError,
{
type Request = Result<R, (E, Framed<T, Codec>)>;
type Response = R;
type Error = (E, Framed<T, Codec>);
type InitError = ();
type Service = SendError<T, R, E>;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self, _: &()) -> Self::Future {
ok(SendError(PhantomData))
}
}
impl<T, R, E> Service for SendError<T, R, E>
where
T: AsyncRead + AsyncWrite,
E: ResponseError,
{
type Request = Result<R, (E, Framed<T, Codec>)>;
type Response = R;
type Error = (E, Framed<T, Codec>);
type Future = Either<FutureResult<R, (E, Framed<T, Codec>)>, SendErrorFut<T, R, E>>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, req: Result<R, (E, Framed<T, Codec>)>) -> Self::Future {
match req {
Ok(r) => Either::A(ok(r)),
Err((e, framed)) => {
let res = e.error_response().set_body(format!("{}", e));
let (res, _body) = res.replace_body(());
Either::B(SendErrorFut {
framed: Some(framed),
res: Some((res, BodySize::Empty).into()),
err: Some(e),
_t: PhantomData,
})
}
}
}
}
pub struct SendErrorFut<T, R, E> {
res: Option<Message<(Response<()>, BodySize)>>,
framed: Option<Framed<T, Codec>>,
err: Option<E>,
_t: PhantomData<R>,
}
impl<T, R, E> Future for SendErrorFut<T, R, E>
where
E: ResponseError,
T: AsyncRead + AsyncWrite,
{
type Item = R;
type Error = (E, Framed<T, Codec>);
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if let Some(res) = self.res.take() {
if self.framed.as_mut().unwrap().force_send(res).is_err() {
return Err((self.err.take().unwrap(), self.framed.take().unwrap()));
}
}
match self.framed.as_mut().unwrap().poll_complete() {
Ok(Async::Ready(_)) => {
Err((self.err.take().unwrap(), self.framed.take().unwrap()))
}
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(_) => Err((self.err.take().unwrap(), self.framed.take().unwrap())),
}
}
}
pub struct SendResponse<T, B>(PhantomData<(T, B)>);
impl<T, B> Default for SendResponse<T, B> {
fn default() -> Self {
SendResponse(PhantomData)
}
}
impl<T, B> SendResponse<T, B>
where
T: AsyncRead + AsyncWrite,
B: MessageBody,
{
pub fn send(
framed: Framed<T, Codec>,
res: Response<B>,
) -> impl Future<Item = Framed<T, Codec>, Error = Error> {
// extract body from response
let (res, body) = res.replace_body(());
// write response
SendResponseFut {
res: Some(Message::Item((res, body.length()))),
body: Some(body),
framed: Some(framed),
}
}
}
impl<T, B> NewService for SendResponse<T, B>
where
T: AsyncRead + AsyncWrite,
B: MessageBody,
{
type Request = (Response<B>, Framed<T, Codec>);
type Response = Framed<T, Codec>;
type Error = Error;
type InitError = ();
type Service = SendResponse<T, B>;
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self, _: &()) -> Self::Future {
ok(SendResponse(PhantomData))
}
}
impl<T, B> Service for SendResponse<T, B>
where
T: AsyncRead + AsyncWrite,
B: MessageBody,
{
type Request = (Response<B>, Framed<T, Codec>);
type Response = Framed<T, Codec>;
type Error = Error;
type Future = SendResponseFut<T, B>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, (res, framed): (Response<B>, Framed<T, Codec>)) -> Self::Future {
let (res, body) = res.replace_body(());
SendResponseFut {
res: Some(Message::Item((res, body.length()))),
body: Some(body),
framed: Some(framed),
}
}
}
pub struct SendResponseFut<T, B> {
res: Option<Message<(Response<()>, BodySize)>>,
body: Option<ResponseBody<B>>,
framed: Option<Framed<T, Codec>>,
}
impl<T, B> Future for SendResponseFut<T, B>
where
T: AsyncRead + AsyncWrite,
B: MessageBody,
{
type Item = Framed<T, Codec>;
type Error = Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
loop {
let mut body_ready = self.body.is_some();
let framed = self.framed.as_mut().unwrap();
// send body
if self.res.is_none() && self.body.is_some() {
while body_ready && self.body.is_some() && !framed.is_write_buf_full() {
match self.body.as_mut().unwrap().poll_next()? {
Async::Ready(item) => {
// body is done
if item.is_none() {
let _ = self.body.take();
}
framed.force_send(Message::Chunk(item))?;
}
Async::NotReady => body_ready = false,
}
}
}
// flush write buffer
if !framed.is_write_buf_empty() {
match framed.poll_complete()? {
Async::Ready(_) => {
if body_ready {
continue;
} else {
return Ok(Async::NotReady);
}
}
Async::NotReady => return Ok(Async::NotReady),
}
}
// send response
if let Some(res) = self.res.take() {
framed.force_send(res)?;
continue;
}
if self.body.is_some() {
if body_ready {
continue;
} else {
return Ok(Async::NotReady);
}
} else {
break;
}
}
Ok(Async::Ready(self.framed.take().unwrap()))
}
}

Some files were not shown because too many files have changed in this diff Show More