1
0
mirror of https://github.com/fafhrd91/actix-web synced 2025-07-04 09:56:22 +02:00

Compare commits

..

189 Commits

Author SHA1 Message Date
6e8ff5c905 Merge pull request #1495 from JohnTitor/new-web
actix-web: Bump up to 3.0.0-alpha.2
2020-05-08 07:28:18 +09:00
b66c3083a5 Update the actix-web dependency to 3.0.0-alpha.2 2020-05-08 06:46:42 +09:00
b6b3481c6f web: Bump up to 3.0.0-alpha.2 2020-05-08 06:46:13 +09:00
574714d156 Merge pull request #1494 from JohnTitor/new-actors
actors: Bump up to 3.0.0-alpha.1
2020-05-08 06:17:15 +09:00
54abf356d4 actors: Bump up to 3.0.0-alpha.1 2020-05-08 03:33:29 +09:00
9cb3b0ef58 Merge pull request #1493 from JohnTitor/http-next
http: Bump up to 2.0.0-alpha.3
2020-05-08 03:09:52 +09:00
9d0c80b6ce Update actix-http deps 2020-05-08 02:35:45 +09:00
0bc4a5e703 http: Bump up to 2.0.0-alpha.3 2020-05-08 02:35:45 +09:00
9d94fb91b2 correct spelling of ConnectError::Unresolved (#1487)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-05-08 02:26:48 +09:00
9164ed1f0c add resource middleware on actix-web-codegen (#1467)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-05-07 18:31:12 +09:00
b521e9b221 conditional test compilation [range, charset] (#1483)
* conditionally compile range and charset tests

* remove deprecated try macros

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-05-03 22:33:29 +09:00
f37cb6dd0b refactor h1 status line helper to remove unsafe usage (#1484)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-05-03 17:37:40 +09:00
d5ceae2074 Replace deprecated now with now_utc (#1481)
* Replace deprecated now with now_utc

* Update doctest
2020-05-02 10:14:50 +01:00
c27d3fad8e clarify resource/scope app data overriding (#1476)
* relocate FnDataFactory

* clarify app data overriding in Scope and Resource

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-04-30 02:20:47 +09:00
bb17280f51 simplify data factory future polling (#1473)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-04-29 15:38:53 +09:00
b047413b39 Small ws codec fix (#1465)
* Small ws codec fix

* Update actix-http/Changes.md

Co-authored-by: Huston Bokinsky <huston@deepgram.com>
2020-04-29 11:13:09 +09:00
Huy
ce24630d31 Fix typos in MIGRATION.md (#1470)
* Fix typos in MIGRATION.md

Those are `crate` not `create`

* Update MIGRATION.md
2020-04-23 03:39:09 +09:00
751253f23e Merge pull request #1463 from actix/fix/doc-typos
fix spelling errors in doc comments
2020-04-21 13:47:03 +09:00
5b0f7fff69 fix spelling errors in doc comments 2020-04-21 04:09:35 +01:00
54619cb680 actix-http: Remove failure support (#1449) 2020-04-16 06:54:34 +09:00
5b36381cb0 Merge pull request #1452 from actix/fix/default-service-data
set data container on default service calls
2020-04-16 06:01:56 +09:00
45e2e40140 set data container on default service calls
closes #1450
2020-04-14 02:33:19 +01:00
df3f722589 Merge pull request #1451 from actix/cache
Remove cache config from GHA workflows
2020-04-13 06:06:45 +09:00
e7ba871bbf Remove cache config from GHA workflows 2020-04-13 03:42:44 +09:00
ebc2e67015 Merge pull request #1442 from JohnTitor/workspace-doc
Deploy all the workspace crates' docs
2020-04-09 00:48:08 +09:00
74ddc852c8 Tweak README 2020-04-08 04:48:01 +09:00
dfaa330a94 Deploy all the workspace crates' docs 2020-04-08 04:42:38 +09:00
0ad02ee0e0 Add convenience functions for testing (#1401)
* Add convenience functions for testing

* Fix remarks from PR and add tests

* Add unpin to read_json_body

* Update changelog
2020-04-06 04:12:44 +09:00
aaff68bf05 Change NormalizePath to append trailing slash (#1433)
* Change NormalizePath to append trailing slash

* add tests

* Update CHANGES.md

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-04-05 03:26:40 +09:00
fcb1dec235 Merge pull request #1431 from OSSystems/topic/explicit-features-requirements
Add explicit feature requirements for examples and tests
2020-03-28 10:58:00 +09:00
7b7daa75a4 Add explicit feature requirements for examples and tests
This allow us to build 'actix-web' without default features and run all
tests.

Signed-off-by: Otavio Salvador <otavio@ossystems.com.br>
2020-03-25 23:49:24 -03:00
2067331884 Refactor actix-codegen duplicate code (#1423)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-03-20 04:40:42 +09:00
bf630d9475 Merge pull request #1422 from OSSystems/topic/impl-error-more-types
Implement `std::error::Error` for our custom errors
2020-03-19 05:05:57 +09:00
146ae4da18 Implement std::error::Error for our custom errors
For allowing a more ergonomic use and better integration on the
ecosystem, this adds the `std::error::Error` `impl` for our custom
errors.

We intent to drop this hand made code once `derive_more` finishes the
addition of the Error derive support[1]. Until that is available, we
need to live with that.

1. https://github.com/JelteF/derive_more/issues/92

Signed-off-by: Otavio Salvador <otavio@ossystems.com.br>
2020-03-18 00:22:18 -03:00
52c5755d56 Merge pull request #1421 from actix/JohnTitor-patch-1
Upload coverage on PRs
2020-03-18 06:16:41 +09:00
5548c57a09 Upload coverage on PRs 2020-03-18 05:04:30 +09:00
0d958fabd7 📝 Improve the code example for JsonConfig (#1418)
* 📝 Improve the code example for JsonConfig

* Remove a redundant comment
2020-03-17 08:23:54 +09:00
c67e4c1fe8 Refactored macros (#1333)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-03-15 07:23:28 +09:00
4875dfbec7 Merge pull request #1416 from JohnTitor/fix-doc
Fix `read_body` doc
2020-03-13 07:06:57 +09:00
d602a7e386 Fix read_body doc 2020-03-13 05:52:58 +09:00
9f196fe5a5 Merge pull request #1413 from OSSystems/topic/fix-warnings
Fix clippy warnings
2020-03-12 16:24:44 +09:00
e4adcd1935 Merge pull request #1411 from JohnTitor/patch
Clean-up metadata
2020-03-12 16:21:34 +09:00
7e0d898d5a Fix clippy warnings
Signed-off-by: Otavio Salvador <otavio@ossystems.com.br>
2020-03-12 00:52:21 -03:00
51518721e5 Add notes to READMEs 2020-03-12 07:57:38 +09:00
c02d3e205b Clean-up metadata 2020-03-12 07:57:20 +09:00
a253d7d3ce Merge pull request #1410 from JohnTitor/new-web
Release actix-web 3.0.0-alpha.1
2020-03-12 05:12:54 +09:00
0152cedc5d Update changelog 2020-03-12 03:24:15 +09:00
a6a47b95ee Exclude actix-cors 2020-03-12 03:04:31 +09:00
1b28a5d48b Update actix-web dependency to 3.0.0-alpha.1 2020-03-12 03:03:50 +09:00
c147b94832 Bump up to 3.0.0-alpha.1 2020-03-12 03:03:22 +09:00
95f9a12a5e dev-deps: Update env_logger to 0.7 2020-03-12 02:58:22 +09:00
73eeab0e90 Merge pull request #1391 from JohnTitor/new-awc
Release awc v2.0.0-alpha.1
2020-03-11 21:15:48 +09:00
ce1e996b41 Update release date 2020-03-11 20:42:45 +09:00
e718f65121 Update tests 2020-03-08 16:42:45 +09:00
a9a475d555 Make test_server async fn 2020-03-08 16:42:26 +09:00
b93e1555ec Update actix-connect to 2.0.0-alpha.2 2020-03-08 15:27:40 +09:00
6f33b7ea42 Update awc dependency 2020-03-08 15:27:40 +09:00
294523a32f Bump up to 2.0.0-alpha.1 2020-03-08 15:27:39 +09:00
6b626c7d77 dev-deps: Update env_logger to 0.7 2020-03-08 03:07:53 +09:00
5da9e277a2 Merge pull request #1399 from JohnTitor/new-http
Release actix-http 2.0.0-alpha.2
2020-03-08 01:47:40 +09:00
0d5646a8b6 Run rustfmt 2020-03-08 00:52:39 +09:00
7941594f94 Update actix-http dependency 2020-03-08 00:50:20 +09:00
6f63acaf01 Bump up to 2.0.0-alpha.2 2020-03-08 00:48:45 +09:00
7172885beb Update changelog 2020-03-08 00:43:17 +09:00
cf721c5fff Update README example 2020-03-08 00:43:01 +09:00
10e3e72595 Http2 client configuration to improve performance (#1394)
* add defaults for http2 client configuration

* fix spaces

* Add changes text for extended H2 defaults buffers

* client: configurable H2 window sizes and max_http_version

* add H2 window size configuration and max_http_version to awc::ClientBuilder

* add awc::ClientBuilder H2 window sizes and max_http_version

* add test for H2 window size settings

* cleanup comment

* Apply code review fixes

* Code review fix for awc ClientBuilder

* Remove unnecessary comments on code review

* pin quote version to resolve build issue

* max_http_version to accept http::Version

* revert fix for quote broken build
2020-03-07 11:09:31 +09:00
a7d805aab7 Merge pull request #1396 from Aaron1011/fix/reapply-dispatcher
Re-apply commit 2cf7b3ad20
2020-03-05 02:48:20 +09:00
e90950fee1 Re-apply commit 2cf7b3ad20
This ended up getting reverted by #1367, which re-introduced an unsound
use of `Pin::new_unchecked`

See my original PR #1374 for the reasoning behind this change.
2020-03-04 11:27:58 -05:00
c8f0672ef7 Merge pull request #1395 from JohnTitor/rustls
Update `rustls` to 0.17
2020-03-04 15:56:27 +09:00
9d661dc4f3 Update changelog 2020-03-04 15:20:14 +09:00
687dc609dd Update rustls to 0.17 2020-03-04 15:11:31 +09:00
b9b52079e0 Update actix-tls to 2.0.0-alpha.1 2020-03-04 15:10:23 +09:00
117d28f7ba Update actix-connect to 2.0.0-alpha.1 2020-03-04 15:09:31 +09:00
795a575fc5 Merge pull request #1386 from JohnTitor/deny-to-warn
Demote lint level to warn
2020-02-28 14:17:11 +09:00
b4d63667df Demote lint level to warn 2020-02-27 22:39:11 +09:00
3dc859af58 Fix missing std::error::Error implement for MultipartError. (#1382)
* Fix missing `std::error::Error` implement for `MultipartError`.

* Update actix-multipart CHANGES.md.
2020-02-27 22:34:06 +09:00
1fa02b5f1c Merge pull request #1385 from JohnTitor/http-2-alpha
Release actix-http 2.0.0-alpha.1
2020-02-27 14:47:32 +09:00
c9fdcc596d Update actix to 0.10.0-alpha.1 2020-02-27 12:46:29 +09:00
6cc83dbb67 Allow clippy lint for compatibility 2020-02-27 12:45:11 +09:00
3b675c9125 Update actix-http to 2.0.0-alpha.1 2020-02-27 12:39:04 +09:00
15a2587887 Bump up to 2.0.0-alpha.1 2020-02-27 12:39:04 +09:00
0173f99726 Update changelog 2020-02-27 12:39:04 +09:00
f27dd19093 Fix Clippy warnings 2020-02-27 12:39:04 +09:00
7ba14fd113 Run rustfmt 2020-02-27 11:10:55 +09:00
903ae47baa dev-deps: Update env_logger to 0.7 2020-02-27 11:08:45 +09:00
95c18dbdf3 Merge pull request #1367 from actix/msg-body
Merge `MessageBody` improvements
2020-02-27 10:42:14 +09:00
d3ccf46e92 Clean-up metadata 2020-02-27 09:53:27 +09:00
cd1765035c Avoid re-definition 2020-02-27 09:42:32 +09:00
ea28219d0f reenable actix-http test-ws 2020-02-27 09:42:32 +09:00
77058ef779 adopt MessageBody Pin changes to actix-web root 2020-02-27 09:42:32 +09:00
e5f2feec45 reenable actix-http from local path 2020-02-27 09:42:32 +09:00
0a86907dd2 use mem::replace instead of mem::take rust 1.40+ 2020-02-27 09:37:05 +09:00
78749a4b7e rollback actix-http version change 2020-02-27 09:37:05 +09:00
de815dd99c Fixed condition for finishing transfer of response 2020-02-27 09:37:05 +09:00
e6078bf792 Fix EncoderBody enum to align with Body::Message 2020-02-27 09:37:05 +09:00
a84b37199a Add Unpin to Body to get rid of unsafe in MessageBody 2020-02-27 09:37:05 +09:00
c05f9475c5 refactor dispatcher to avoid possible UB with DispatcherState Pin 2020-02-27 09:37:05 +09:00
69dab0063c Get rid of one more unsafe 2020-02-27 09:37:05 +09:00
ec5c779732 unlink MessageBody from Unpin 2020-02-27 09:37:05 +09:00
2e2ea7ab80 remove extra whitespaces and Unpins 2020-02-27 09:37:05 +09:00
eeebc653fd change actix-http version to alpha 2020-02-27 09:37:05 +09:00
835a00599c rollback missed dependencies and CHANGES in crates except actix-http 2020-02-27 09:37:05 +09:00
d9c415e540 disable weird poll test until actix-web based on actix-http:2 2020-02-27 09:37:05 +09:00
09a391a3ca rollback changes to actix-web, awc and test-server for now 2020-02-27 09:37:05 +09:00
62aba424e2 Rollback actix-http-test dependency to show the issue 2020-02-27 09:37:05 +09:00
9d04b250f9 This is a squashed commit:
- Convert MessageBody to accept Pin in poll_next

- add CHANGES and increase versions aligned to semver

- update crates to accomodate MessageBody Pin change

- fix tests and dependencies
2020-02-27 09:37:05 +09:00
a4148de226 add test crashing with segfault according to #1321 2020-02-27 09:36:30 +09:00
48ef4d7a26 Add actix-http support for actix error messages (#1379)
* Moved actix-http for actix from actix crate

* remove resolver feature

* renamed actix feature to actor

* fixed doc attr for actors, add documentation
2020-02-27 09:34:49 +09:00
71c4bd1b30 Remove uses of Pin::new_unchecked in h1 Dispatcher (#1374)
This removes the last uses of unsafe `Pin` functions in actix-web.

This PR adds a `Pin<Box<_>>` wrapper to `DispatcherState::Upgrade`,
`State::ExpectCall`, and `State::ServiceCall`.

The previous uses of the futures `State::ExpectCall` and `State::ServiceCall`
were Undefined Behavior - a future was obtained from `self.expect.call`
or `self.service.call`, pinned on the stack, and then immediately
returned from `handle_request`. The only alternative to using `Box::pin`
would be to refactor `handle_request` to write the futures directly into
their final location, or avoid polling them before they are returned.

The previous use of `DispatcherState::Upgrade` doesn't seem to be
unsound. However, having data pinned inside an enum that we
`std::mem::replace` would require some careful `unsafe` code to ensure
that we never call `std::mem::replace` when the active variant contains
pinned data. By using `Box::pin`, we any possibility of future
refactoring accidentally introducing undefined behavior.

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-02-26 08:21:05 +09:00
de1d6ad5cb Merge pull request #1344 from actix/replace-unsafe-content-length-helper
Replace unsafe content length helper
2020-02-25 17:02:22 +09:00
2a72e8d119 Merge branch 'master' into replace-unsafe-content-length-helper 2020-02-25 14:30:04 +09:00
2a8e5fdc73 Merge pull request #1370 from mattgathu/feat/helper-function-for-trace-method
Create helper function for HTTP Trace Method
2020-02-25 14:24:09 +09:00
b213c07799 Merge branch 'master' into feat/helper-function-for-trace-method 2020-02-25 12:36:20 +09:00
3d6b8686ad Merge pull request #1373 from JohnTitor/new-codegen
Release `actix-web-codegen` v0.2.1
2020-02-25 09:32:48 +09:00
a4f87a53da Update CHANGES.md 2020-02-25 08:42:39 +09:00
08f172a0aa Merge branch 'master' into new-codegen 2020-02-25 08:29:31 +09:00
7792eaa16e Merge pull request #1378 from JohnTitor/fix-doc
Fix doc comment
2020-02-25 08:29:14 +09:00
845ce3cf34 Fix doc comment 2020-02-25 07:46:03 +09:00
7daef22e24 Merge branch 'master' into new-codegen 2020-02-25 06:58:49 +09:00
1249262c35 Merge pull request #1372 from JohnTitor/time-0.2.7
Update `time` to 0.2.7
2020-02-25 06:58:33 +09:00
94da08f506 increase content-length fast path to responses up to 1MB 2020-02-24 20:58:41 +00:00
d143c44130 Update the ChangeLog 2020-02-23 09:33:28 +01:00
8ec8ccf4fb Create helper function for HTTP Trace Method
Create *route* with `TRACE` method guard.
2020-02-23 09:25:55 +01:00
c8ccc69b93 actix-http: update time to 0.2.7 2020-02-23 07:09:00 +09:00
f9f9fb4c84 actix-http-test: update time to 0.2.7 2020-02-23 07:08:50 +09:00
1b77963aac actix-web: update time to 0.2.7 2020-02-23 07:08:22 +09:00
036ffd43f9 Prepare for new release 2020-02-23 06:40:02 +09:00
bdccccd536 Merge pull request #1368 from mattgathu/add-missing-docs-attr-to-codegen-structs
Add`#[allow(missing_docs)]` attribute to generated structs
2020-02-23 06:26:42 +09:00
060c392c67 Add missing_docs attribute to generated structs 2020-02-22 10:32:12 +01:00
245f96868a impl downcast_ref for MessageBody (#1287)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-02-21 13:31:51 +09:00
b3f1071aaf Merge pull request #1361 from Aaron1011/fix/connector-pool-support
Use #[pin_project] with `ConnectorPoolSupport`
2020-02-21 06:01:26 +09:00
e6811e8818 Use #[pin_project] with ConnectorPoolSupport
This removes a use of `Pin::get_unchecked_mut`
2020-02-19 21:42:53 -05:00
809930d36e Add dependencies to docs example (#1343)
* Add dependencies to docs example

* Change codeblock type to toml

* Clarify the need for actix-rt

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-02-20 05:13:10 +09:00
f266b44cb0 replace unsafe blocks in write_usize helper 2020-02-16 15:20:25 +00:00
31a3515e90 add safe vs unsafe benchmarks 2020-02-16 14:31:06 +00:00
82b2786d6b replace unsafe content length implementation 2020-02-16 14:31:05 +00:00
6ab7cfa2be Remove descriptions about undefined uds feature from docs (#1356) 2020-02-16 04:18:31 +09:00
9b3f7248a8 Merge pull request #1354 from actix/JohnTitor-patch-1
Disable coverage for PRs
2020-02-14 08:18:09 +09:00
a1835d6510 Disable coverage for PRs 2020-02-14 07:31:29 +09:00
4484b3f66e Merge pull request #1347 from actix/bye-travis
Use Actions fully
2020-02-12 05:54:32 +09:00
cde3ae5f61 Remove Travis config 2020-02-08 05:36:11 +09:00
7d40b66300 Add some Actions workflows 2020-02-08 04:28:34 +09:00
63730c1f73 Merge pull request #1345 from JohnTitor/fix-warnings
Fix warnings
2020-02-08 04:17:04 +09:00
53ff3ad099 More ignore test causes timeout 2020-02-08 02:20:01 +09:00
6406f56ca2 Fix/suppress warnings 2020-02-08 02:20:01 +09:00
728b944360 Extensions module improvement and tests. (#1297)
* replace get.is_some to contains_key

* Add tests

* remove unnecessary box cast

* fix missing uints

* asserts fix

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-02-07 16:08:25 +09:00
3851a377df Fix minor grammatical errors (#1341) 2020-02-07 03:00:22 +09:00
fe13789345 Use Pin<Box<S>> in BodyStream and SizedStream (#1328)
Fixes #1321

A better fix would be to change `MessageBody` to take a `Pin<&mut
Self>`, rather than a `Pin<&mut Self>`. This will avoid requiring the
use of `Box` for all consumers by allowing the caller to determine how
to pin the `MessageBody` implementation (e.g. via stack pinning).

However, doing so is a breaking change that will affect every user of
`MessageBody`. By pinning the inner stream ourselves, we can fix the
undefined behavior without breaking the API.

I've included @sebzim4500's reproduction case as a new test case.
However, due to the nature of undefined behavior, this could pass (and
not segfault) even if underlying issue were to regress.

Unfortunately, until rust-lang/unsafe-code-guidelines#148 is resolved,
it's not even possible to write a Miri test that will pass when the bug
is fixed.

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-31 09:39:34 +09:00
3033f187d2 Enforce safety of downcast_ref at compile time. (#1326)
* Enforce safety of `downcast_ref` at compile time.

The safety of `downcast_ref` requires that `__private_get_type_id__` not
be overriden by callers, since the returned `TypeId` is used to check if
the cast is safe. However, all trait methods in Rust are public, so
users can override `__private_get_type_id__` despite it being
`#[doc(hidden)]`.

This commit makes `__private_get_type_id__` return a type with a private
constructor, ensuring that the only possible implementation is the
default implementation. A more detailed explanation is provided in the
comments added to the file.

Note that the standard library was affected by this type of issue with
the `Error::type_id` function: see https://blog.rust-lang.org/2019/05/14/Rust-1.34.2.html#whats-in-1.34.2-stable

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-30 23:43:35 +09:00
276a5a3ee4 Replace UnsafeCell with Cell in DateServiceInner (#1325)
* Replace `UnsafeCell` with `Cell` in `DateServiceInner`

This ensures that it's impossible to cause undefined behavior by
accidentally violating Rust's aliasing rules (e.g. passing a closure to
`set_date` which ends up invoking `reset` or `update` on the inner
`DateServiceInner`).

There might be a tiny amount of overhead from copying the `Option<(Date,
Instant)>` rather than taking a reference, but it shouldn't be
measurable.

Since the wrapped type is `Copy`, a `Cell` can be used, avoiding the
runtime overhead of a `RefCell`.

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-29 21:05:08 +09:00
664f9a8b2d Long lasting auto-prolonged session (#1292)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-29 10:26:39 +09:00
c73c2dc12c Don't use cache in Windows CI (#1327) 2020-01-29 09:00:04 +09:00
e634e64847 Upgrade time to 0.2.5 (#1254)
* Use `OffsetDateTime` instead of `PrimitiveDateTime`

* Parse time strings with `PrimitiveDateTime::parse` instead of `OffsetDateTime::parse`

* Remove unused `time` dependency from actix-multipart

* Fix a few errors with time related tests from the `time` upgrade

* Implement logic to convert a RFC 850 two-digit year into a full length year, and organize time parsing related functions

* Upgrade `time` to 0.2.2

* Correctly parse C's asctime time format using time 0.2's new format patterns

* Update CHANGES.md

* Use `time` without any of its deprecated functions

* Enforce a UTC time offset when converting an `OffsetDateTime` into a Header value

* Use the more readable version of `Duration::seconds(0)`, `Duration::zero()`

* Remove unneeded conversion of time::Duration to std::time::Duration

* Use `OffsetDateTime::as_seconds_f64` instead of manually calculating the amount of seconds from nanoseconds

* Replace a few additional instances of `Duration::seconds(0)` with `Duration::zero()`

* Truncate any nanoseconds from a supplied `Duration` within `Cookie::set_max_age` to ensure two Cookies with the same amount whole seconds equate to one another

* Fix the actix-http:🍪:do_not_panic_on_large_max_ages test

* Convert `Cookie::max_age` and `Cookie::expires` examples to `time` 0.2

Mainly minor  changes. Type inference can be used alongside the new
`time::parse` method, such that the type doesn't need to be specified.
This will be useful if a refactoring takes place that changes the type.
There are also new macros, which are used where possible.

One change that is not immediately obvious, in `HttpDate`, there was an
unnecessary conditional. As the time crate allows for negative durations
(and can perform arithmetic with such), the if/else can be removed
entirely.

Time v0.2.3 also has some bug fixes, which is why I am not using a more
general v0.2 in Cargo.toml.

v0.2.3 has been yanked, as it was backwards imcompatible. This version
reverts the breaking change, while still supporting rustc back to
1.34.0.

* Add missing `time::offset` macro import

* Fix type confusion when using `time::parse` followed by `using_offset`

* Update `time` to 0.2.5

* Update CHANGES.md

Co-authored-by: Jacob Pratt <the.z.cuber@gmail.com>
2020-01-28 20:44:22 +09:00
cdba30d45f Skip empty chucks for BodyStream and SizedStream (#1308)
* Skip empty chucks for BodyStream and SizedStream when streaming response (#1267)

* Fix tests to fail on previous implementation

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-28 18:28:09 +09:00
74dcc7366d Remove several uses of Pin::new_unchecked (#1294)
Most of the relevant struct already had a `#[pin_project]` attribute,
but it wasn't being used.

The remaining uses of `Pin::new_unchecked` all involve going from a
`&mut T` to a `Pin<&mut T>`, without directly observing a `Pin<&mut T>`
first. As such, they cannot be replaced by `pin_project`

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-28 12:35:51 +09:00
d137a8635b Replace Pin::new_unchecked with #[pin_project] in tuple_from_req! (#1293)
Using some module trickery, we can generate a tuple struct for each
invocation of the macro. This allows us to use `pin_project` to project
through to the tuple fields, removing the need to use
`Pin::new_unchecked`

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-28 10:45:25 +09:00
a2d4ff157e Update call_service documentation (#1302)
Co-authored-by: Christian Battaglia <christian.d.battaglia@gmail.com>
2020-01-28 08:09:46 +09:00
71d11644a7 Add ability to name a handler function as 'config' (#1290)
* eliminate handler naming restrictions #1277

* Update actix-web-codegen/CHANGES.md

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-26 07:22:40 +09:00
8888520d83 Add benchmark for full stack request lifecycle (#1298)
* add benchmark for full stack request lifecycle

* add direct service benchmarks

* fix newline

* add cloneable service benchmarks

* remove cloneable bench experiments + cargo fmt

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-25 08:05:25 +09:00
cf3577550c Tweak caches (#1319)
* Try to use `cargo-cache`

* Tweak issue template
2020-01-25 02:27:13 +09:00
58844874a0 Fixing #1295 convert UnsafeCell to RefCell in CloneableService (#1303)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-24 14:51:38 +09:00
78f24dda03 Initial Issue template (#1311)
* Initial Issue template

* First round of changes for the bug report

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-24 07:32:34 +09:00
e17b3accb9 Remove codecoverage for tests and examples (#1299)
* Ignore Tests & Examples for CodeCoverage

Ignore Tests & Examples for CodeCoverage
2020-01-24 05:10:02 +09:00
c6fa007e72 Fix vcpkg cache (#1312) 2020-01-23 11:27:34 +09:00
a3287948d1 allow explicit SameSite=None cookies (#1282)
fixes #1035
2020-01-23 10:08:23 +09:00
2e9ab0625e Tweak actions (#1305)
* Add benchmark action

* Fix Windows build
2020-01-23 06:23:53 +09:00
3a5b62b550 Add dependencies instruction (#1281) 2020-01-16 23:17:17 +09:00
412e54ce10 Fixed documentation typo for actix-files (#1278) 2020-01-15 11:09:58 -08:00
bca41f8d40 Changes to Cors builder (#1266)
Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-11 04:53:17 +09:00
7c974ee668 Update doc comment for HttpRequest::app_data (#1265)
* update doc comment for `HttpRequest::app_data`

* add `no_run` to doc comment

* add `ignore` to doc comment

* Update src/request.rs

Co-Authored-By: Yuki Okushi <huyuumi.dev@gmail.com>

Co-authored-by: Yuki Okushi <huyuumi.dev@gmail.com>
2020-01-11 03:55:20 +09:00
abb462ef85 Replace sha1 dependency with sha-1 (#1258)
* Replace sha1 dependency with sha-1

This other crate is being maintained, and it offers better performances
when using the `asm` feature (especially [on
AArch64](https://github.com/RustCrypto/hashes/pull/97)).

* Update CHANGES.md with the sha-1 migration

* Add a test for hash_key()
2020-01-11 02:34:31 +09:00
e66312b664 add extra constraints 2020-01-10 11:36:59 +06:00
39f4b2b39e Merge branch 'master' of github.com:actix/actix-web 2020-01-10 11:28:58 +06:00
f6ff056b8a Fix panic with already borrowed: BorrowMutError #1263 2020-01-10 11:26:54 +06:00
51ab4fb73d Tweak actions to use cache and not to be stuck on the way (#1264) 2020-01-10 03:30:45 +09:00
f5fd6bc49f Fix actix-http examples (#1259)
Fix actix-http examples
2020-01-07 00:15:04 +09:00
2803fcbe22 Small grammaritical update to lib.rs (#1248) 2020-01-03 08:45:17 +06:00
67793c5d92 add ssl feature migration 2019-12-30 21:22:04 +06:00
bcb5086c91 Added 2.0.0 rustls feature name change (#1244) 2019-12-30 21:16:04 +06:00
7bd2270290 Fix link to example in readme.md (#1236)
* Fix link to example in readme.md

* Add links to openssl and rustls examples

* Rustls should be uppercase
2019-12-26 19:42:07 +09:00
a4ad5e6b69 update timeouts for test server 2019-12-25 20:52:20 +04:00
6db909a3e7 update migration 2019-12-25 20:27:30 +04:00
642ae161c0 prep actix-web release 2019-12-25 20:21:00 +04:00
7b3c99b933 prep actix-framed release 2019-12-25 20:17:22 +04:00
f86ce0390e allow to specify multi pattern for resources 2019-12-25 20:14:44 +04:00
7882f545e5 Allow to gracefully stop test server via TestServer::stop() 2019-12-25 12:10:48 +04:00
1c75e6876b update migration 2019-12-22 17:16:07 +04:00
6a0cd2dced Rename HttpServer::start() to HttpServer::run() 2019-12-22 17:12:22 +04:00
c7f3915779 update actix-service dep 2019-12-22 16:39:25 +04:00
145 changed files with 4263 additions and 1875 deletions

37
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@ -0,0 +1,37 @@
---
name: bug report
about: create a bug report
---
Your issue may already be reported!
Please search on the [Actix Web issue tracker](https://github.com/actix/actix-web/issues) before creating one.
## Expected Behavior
<!--- If you're describing a bug, tell us what should happen -->
<!--- If you're suggesting a change/improvement, tell us how it should work -->
## Current Behavior
<!--- If describing a bug, tell us what happens instead of the expected behavior -->
<!--- If suggesting a change/improvement, explain the difference from current behavior -->
## Possible Solution
<!--- Not obligatory, but suggest a fix/reason for the bug, -->
<!--- or ideas how to implement the addition or change -->
## Steps to Reproduce (for bugs)
<!--- Provide a link to a live example, or an unambiguous set of steps to -->
<!--- reproduce this bug. Include code to reproduce, if relevant -->
1.
2.
3.
4.
## Context
<!--- How has this issue affected you? What are you trying to accomplish? -->
<!--- Providing context helps us come up with a solution that is most useful in the real world -->
## Your Environment
<!--- Include as many relevant details about the environment you experienced the bug in -->
* Rust Version (I.e, output of `rustc -V`):
* Actix Web Version:

22
.github/workflows/bench.yml vendored Normal file
View File

@ -0,0 +1,22 @@
name: Benchmark (Linux)
on: [push, pull_request]
jobs:
check_benchmark:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: nightly
profile: minimal
override: true
- name: Check benchmark
uses: actions-rs/cargo@v1
with:
command: bench

64
.github/workflows/linux.yml vendored Normal file
View File

@ -0,0 +1,64 @@
name: CI (Linux)
on: [push, pull_request]
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- 1.39.0 # MSRV
- stable
- nightly
name: ${{ matrix.version }} - x86_64-unknown-linux-gnu
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@master
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --all --all-features --no-fail-fast -- --nocapture
- name: tests (actix-http)
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --package=actix-http --no-default-features --features=rustls -- --nocapture
- name: tests (awc)
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --package=awc --no-default-features --features=rustls -- --nocapture
- name: Generate coverage file
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
run: |
cargo install cargo-tarpaulin
cargo tarpaulin --out Xml
- name: Upload to Codecov
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
uses: codecov/codecov-action@v1
with:
file: cobertura.xml

39
.github/workflows/macos.yml vendored Normal file
View File

@ -0,0 +1,39 @@
name: CI (macOS)
on: [push, pull_request]
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- stable
- nightly
name: ${{ matrix.version }} - x86_64-apple-darwin
runs-on: macOS-latest
steps:
- uses: actions/checkout@master
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-apple-darwin
profile: minimal
override: true
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
with:
command: test
args: --all --all-features --no-fail-fast -- --nocapture
--skip=test_h2_content_length
--skip=test_reading_deflate_encoding_large_random_rustls

View File

@ -1,67 +0,0 @@
name: CI
on: [push, pull_request]
env:
VCPKGRS_DYNAMIC: 1
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
toolchain:
- x86_64-pc-windows-msvc
# - i686-pc-windows-msvc
- x86_64-apple-darwin
version:
- stable
- nightly
include:
- toolchain: x86_64-pc-windows-msvc
os: windows-latest
arch: x64
# - toolchain: i686-pc-windows-msvc
# os: windows-latest
# arch: x86
- toolchain: x86_64-apple-darwin
os: macOS-latest
name: ${{ matrix.version }} - ${{ matrix.toolchain }}
runs-on: ${{ matrix.os }}
steps:
- uses: actions/checkout@master
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-${{ matrix.toolchain }}
default: true
- name: Install OpenSSL
if: matrix.os == 'windows-latest'
run: |
vcpkg integrate install
vcpkg install openssl:${{ matrix.arch }}-windows
- name: check nightly
if: matrix.version == 'nightly'
uses: actions-rs/cargo@v1
with:
command: check
args: --all --benches --bins --examples --tests
- name: check stable
if: matrix.version == 'stable'
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --examples --tests
- name: tests
if: matrix.toolchain != 'x86_64-pc-windows-gnu'
uses: actions-rs/cargo@v1
with:
command: test
args: --all --all-features -- --nocapture

35
.github/workflows/upload-doc.yml vendored Normal file
View File

@ -0,0 +1,35 @@
name: Upload documentation
on:
push:
branches:
- master
jobs:
build:
runs-on: ubuntu-latest
if: github.repository == 'actix/actix-web'
steps:
- uses: actions/checkout@master
- name: Install Rust
uses: actions-rs/toolchain@v1
with:
toolchain: stable-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: check build
uses: actions-rs/cargo@v1
with:
command: doc
args: --no-deps --workspace --all-features
- name: Tweak HTML
run: echo "<meta http-equiv=refresh content=0;url=os_balloon/index.html>" > target/doc/index.html
- name: Upload documentation
run: |
git clone https://github.com/davisp/ghp-import.git
./ghp-import/ghp_import.py -n -p -f -m "Documentation upload" -r https://${{ secrets.GITHUB_TOKEN }}@github.com/"${{ github.repository }}.git" target/doc

59
.github/workflows/windows.yml vendored Normal file
View File

@ -0,0 +1,59 @@
name: CI (Windows)
on: [push, pull_request]
env:
VCPKGRS_DYNAMIC: 1
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- stable
- nightly
name: ${{ matrix.version }} - x86_64-pc-windows-msvc
runs-on: windows-latest
steps:
- uses: actions/checkout@master
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-pc-windows-msvc
profile: minimal
override: true
- name: Install OpenSSL
run: |
vcpkg integrate install
vcpkg install openssl:x64-windows
Copy-Item C:\vcpkg\installed\x64-windows\bin\libcrypto-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libcrypto.dll
Copy-Item C:\vcpkg\installed\x64-windows\bin\libssl-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libssl.dll
Get-ChildItem C:\vcpkg\installed\x64-windows\bin
Get-ChildItem C:\vcpkg\installed\x64-windows\lib
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --all --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
with:
command: test
args: --all --all-features --no-fail-fast -- --nocapture
--skip=test_h2_content_length
--skip=test_reading_deflate_encoding_large_random_rustls
--skip=test_params
--skip=test_simple
--skip=test_expect_continue
--skip=test_http10_keepalive
--skip=test_slow_request
--skip=test_connection_force_close
--skip=test_connection_server_close
--skip=test_connection_wait_queue_force_close

View File

@ -1,61 +0,0 @@
language: rust
sudo: required
dist: trusty
cache:
# cargo: true
apt: true
matrix:
include:
- rust: stable
- rust: beta
- rust: nightly-2019-11-20
allow_failures:
- rust: nightly-2019-11-20
env:
global:
# - RUSTFLAGS="-C link-dead-code"
- OPENSSL_VERSION=openssl-1.0.2
before_install:
- sudo add-apt-repository -y ppa:0k53d-karl-f830m/openssl
- sudo apt-get update -qq
- sudo apt-get install -y openssl libssl-dev libelf-dev libdw-dev cmake gcc binutils-dev libiberty-dev
before_cache: |
if [[ "$TRAVIS_RUST_VERSION" == "nightly-2019-11-20" ]]; then
RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install --version 0.6.11 cargo-tarpaulin
fi
# Add clippy
before_script:
- export PATH=$PATH:~/.cargo/bin
script:
- cargo update
- cargo check --all --no-default-features
- |
if [[ "$TRAVIS_RUST_VERSION" == "stable" || "$TRAVIS_RUST_VERSION" == "beta" ]]; then
cargo test --all-features --all -- --nocapture
cd actix-http; cargo test --no-default-features --features="rustls" -- --nocapture; cd ..
cd awc; cargo test --no-default-features --features="rustls" -- --nocapture; cd ..
fi
# Upload docs
after_success:
- |
if [[ "$TRAVIS_OS_NAME" == "linux" && "$TRAVIS_PULL_REQUEST" = "false" && "$TRAVIS_BRANCH" == "master" && "$TRAVIS_RUST_VERSION" == "stable" ]]; then
cargo doc --no-deps --all-features &&
echo "<meta http-equiv=refresh content=0;url=os_balloon/index.html>" > target/doc/index.html &&
git clone https://github.com/davisp/ghp-import.git &&
./ghp-import/ghp_import.py -n -p -f -m "Documentation upload" -r https://"$GH_TOKEN"@github.com/"$TRAVIS_REPO_SLUG.git" target/doc &&
echo "Uploaded documentation"
fi
- |
if [[ "$TRAVIS_RUST_VERSION" == "nightly-2019-11-20" ]]; then
taskset -c 0 cargo tarpaulin --out Xml --all --all-features
bash <(curl -s https://codecov.io/bash)
echo "Uploaded code coverage"
fi

View File

@ -1,5 +1,44 @@
# Changes
## [3.0.0-alpha.2] - 2020-05-08
### Changed
* `{Resource,Scope}::default_service(f)` handlers now support app data extraction. [#1452]
* Implement `std::error::Error` for our custom errors [#1422]
* NormalizePath middleware now appends trailing / so that routes of form /example/ respond to /example requests.
* Remove the `failure` feature and support.
[#1422]: https://github.com/actix/actix-web/pull/1422
[#1452]: https://github.com/actix/actix-web/pull/1452
## [3.0.0-alpha.1] - 2020-03-11
### Added
* Add helper function for creating routes with `TRACE` method guard `web::trace()`
* Add convenience functions `test::read_body_json()` and `test::TestRequest::send_request()` for testing.
### Changed
* Use `sha-1` crate instead of unmaintained `sha1` crate
* Skip empty chunks when returning response from a `Stream` [#1308]
* Update the `time` dependency to 0.2.7
* Update `actix-tls` dependency to 2.0.0-alpha.1
* Update `rustls` dependency to 0.17
[#1308]: https://github.com/actix/actix-web/pull/1308
## [2.0.0] - 2019-12-25
### Changed
* Rename `HttpServer::start()` to `HttpServer::run()`
* Allow to gracefully stop test server via `TestServer::stop()`
* Allow to specify multi-patterns for resources
## [2.0.0-rc] - 2019-12-20
### Changed

View File

@ -1,6 +1,6 @@
[package]
name = "actix-web"
version = "2.0.0-rc"
version = "3.0.0-alpha.2"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix web is a simple, pragmatic and extremely fast web framework for Rust."
readme = "README.md"
@ -30,11 +30,11 @@ members = [
".",
"awc",
"actix-http",
"actix-cors",
# "actix-cors",
"actix-files",
"actix-framed",
"actix-session",
"actix-identity",
# "actix-session",
# "actix-identity",
"actix-multipart",
"actix-web-actors",
"actix-web-codegen",
@ -42,7 +42,7 @@ members = [
]
[features]
default = ["compress", "failure"]
default = ["compress"]
# content-encoding support
compress = ["actix-http/compress", "awc/compress"]
@ -50,29 +50,39 @@ compress = ["actix-http/compress", "awc/compress"]
# sessions feature, session require "ring" crate and c compiler
secure-cookies = ["actix-http/secure-cookies"]
failure = ["actix-http/failure"]
# openssl
openssl = ["actix-tls/openssl", "awc/openssl", "open-ssl"]
# rustls
rustls = ["actix-tls/rustls", "awc/rustls", "rust-tls"]
[[example]]
name = "basic"
required-features = ["compress"]
[[example]]
name = "uds"
required-features = ["compress"]
[[test]]
name = "test_server"
required-features = ["compress"]
[dependencies]
actix-codec = "0.2.0"
actix-service = "1.0.0"
actix-utils = "1.0.3"
actix-router = "0.2.0"
actix-service = "1.0.2"
actix-utils = "1.0.6"
actix-router = "0.2.4"
actix-rt = "1.0.0"
actix-server = "1.0.0"
actix-testing = "1.0.0"
actix-macros = "0.1.0"
actix-threadpool = "0.3.0"
actix-tls = "1.0.0"
actix-threadpool = "0.3.1"
actix-tls = "2.0.0-alpha.1"
actix-web-codegen = "0.2.0"
actix-http = "1.0.0"
awc = { version = "1.0.1", default-features = false }
actix-http = "2.0.0-alpha.3"
awc = { version = "2.0.0-alpha.1", default-features = false }
bytes = "0.5.3"
derive_more = "0.99.2"
@ -87,18 +97,19 @@ regex = "1.3"
serde = { version = "1.0", features=["derive"] }
serde_json = "1.0"
serde_urlencoded = "0.6.1"
time = "0.1.42"
time = { version = "0.2.7", default-features = false, features = ["std"] }
url = "2.1"
open-ssl = { version="0.10", package = "openssl", optional = true }
rust-tls = { version = "0.16.0", package = "rustls", optional = true }
rust-tls = { version = "0.17.0", package = "rustls", optional = true }
[dev-dependencies]
actix = "0.9.0-alpha.2"
actix = "0.10.0-alpha.1"
rand = "0.7"
env_logger = "0.6"
env_logger = "0.7"
serde_derive = "1.0"
brotli2 = "0.3.2"
flate2 = "1.0.13"
criterion = "0.3"
[profile.release]
lto = true
@ -110,9 +121,14 @@ actix-web = { path = "." }
actix-http = { path = "actix-http" }
actix-http-test = { path = "test-server" }
actix-web-codegen = { path = "actix-web-codegen" }
actix-cors = { path = "actix-cors" }
actix-identity = { path = "actix-identity" }
actix-session = { path = "actix-session" }
actix-files = { path = "actix-files" }
actix-multipart = { path = "actix-multipart" }
awc = { path = "awc" }
[[bench]]
name = "server"
harness = false
[[bench]]
name = "service"
harness = false

View File

@ -1,16 +1,58 @@
## Unreleased
* Setting a cookie's SameSite property, explicitly, to `SameSite::None` will now
result in `SameSite=None` being sent with the response Set-Cookie header.
To create a cookie without a SameSite attribute, remove any calls setting same_site.
* actix-http support for Actors messages was moved to actix-http crate and is enabled
with feature `actors`
## 2.0.0
* `HttpServer::start()` renamed to `HttpServer::run()`. It also possible to
`.await` on `run` method result, in that case it awaits server exit.
* `App::register_data()` renamed to `App::app_data()` and accepts any type `T: 'static`.
Stored data is available via `HttpRequest::app_data()` method at runtime.
* Extractor configuration must be registered with `App::app_data()` instead of `App::data()`
* Sync handlers has been removed. `.to_async()` method has been renamed to `.to()`
replace `fn` with `async fn` to convert sync handler to async
* `TestServer::new()` renamed to `TestServer::start()`
* `actix_http_test::TestServer` moved to `actix_web::test` module. To start
test server use `test::start()` or `test_start_with_config()` methods
* `ResponseError` trait has been reafctored. `ResponseError::error_response()` renders
http response.
* Feature `rust-tls` renamed to `rustls`
instead of
```rust
actix-web = { version = "2.0.0", features = ["rust-tls"] }
```
use
```rust
actix-web = { version = "2.0.0", features = ["rustls"] }
```
* Feature `ssl` renamed to `openssl`
instead of
```rust
actix-web = { version = "2.0.0", features = ["ssl"] }
```
use
```rust
actix-web = { version = "2.0.0", features = ["openssl"] }
```
* `Cors` builder now requires that you call `.finish()` to construct the middleware
## 1.0.1
@ -318,7 +360,7 @@
* `actix_web::server` module has been removed. To start http server use `actix_web::HttpServer` type
* StaticFiles and NamedFile has been move to separate create.
* StaticFiles and NamedFile have been moved to a separate crate.
instead of `use actix_web::fs::StaticFile`
@ -328,7 +370,7 @@
use `use actix_files::NamedFile`
* Multipart has been move to separate create.
* Multipart has been moved to a separate crate.
instead of `use actix_web::multipart::Multipart`

View File

@ -39,8 +39,24 @@ Actix web is a simple, pragmatic and extremely fast web framework for Rust.
* Includes an asynchronous [HTTP client](https://actix.rs/actix-web/actix_web/client/index.html)
* Supports [Actix actor framework](https://github.com/actix/actix)
## Docs
* [API documentation (master)](https://actix.rs/actix-web/actix_web)
* [API documentation (docs.rs)](https://docs.rs/actix-web)
* [User guide](https://actix.rs)
## Example
Dependencies:
```toml
[dependencies]
actix-web = "2"
actix-rt = "1"
```
Code:
```rust
use actix_web::{get, web, App, HttpServer, Responder};
@ -53,7 +69,7 @@ async fn index(info: web::Path<(u32, String)>) -> impl Responder {
async fn main() -> std::io::Result<()> {
HttpServer::new(|| App::new().service(index))
.bind("127.0.0.1:8080")?
.start()
.run()
.await
}
```
@ -64,11 +80,12 @@ async fn main() -> std::io::Result<()> {
* [Stateful](https://github.com/actix/examples/tree/master/state/)
* [Multipart streams](https://github.com/actix/examples/tree/master/multipart/)
* [Simple websocket](https://github.com/actix/examples/tree/master/websocket/)
* [Tera](https://github.com/actix/examples/tree/master/template_tera/) /
[Askama](https://github.com/actix/examples/tree/master/template_askama/) templates
* [Tera](https://github.com/actix/examples/tree/master/template_tera/)
* [Askama](https://github.com/actix/examples/tree/master/template_askama/) templates
* [Diesel integration](https://github.com/actix/examples/tree/master/diesel/)
* [r2d2](https://github.com/actix/examples/tree/master/r2d2/)
* [SSL / HTTP/2.0](https://github.com/actix/examples/tree/master/tls/)
* [OpenSSL](https://github.com/actix/examples/tree/master/openssl/)
* [Rustls](https://github.com/actix/examples/tree/master/rustls/)
* [Tcp/Websocket chat](https://github.com/actix/examples/tree/master/websocket-chat/)
* [Json](https://github.com/actix/examples/tree/master/json/)

View File

@ -18,7 +18,7 @@ path = "src/lib.rs"
[dependencies]
actix-web = "2.0.0-rc"
actix-service = "1.0.0"
actix-service = "1.0.1"
derive_more = "0.99.2"
futures = "0.3.1"

View File

@ -1,5 +1,7 @@
# Cors Middleware for actix web framework [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-cors)](https://crates.io/crates/actix-cors) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
**This crate moved to https://github.com/actix/actix-extras.**
## Documentation & community resources
* [User Guide](https://actix.rs/docs/)

View File

@ -814,7 +814,7 @@ where
res
}
}
.boxed_local(),
.boxed_local(),
)
}
}

View File

@ -18,9 +18,9 @@ name = "actix_files"
path = "src/lib.rs"
[dependencies]
actix-web = { version = "2.0.0-rc", default-features = false }
actix-http = "1.0.1"
actix-service = "1.0.0"
actix-web = { version = "3.0.0-alpha.2", default-features = false }
actix-http = "2.0.0-alpha.3"
actix-service = "1.0.1"
bitflags = "1"
bytes = "0.5.3"
futures = "0.3.1"
@ -33,4 +33,4 @@ v_htmlescape = "0.4"
[dev-dependencies]
actix-rt = "1.0.0"
actix-web = { version = "2.0.0-rc", features=["openssl"] }
actix-web = { version = "3.0.0-alpha.2", features = ["openssl"] }

View File

@ -5,6 +5,7 @@ use derive_more::Display;
#[derive(Display, Debug, PartialEq)]
pub enum FilesError {
/// Path is not a directory
#[allow(dead_code)]
#[display(fmt = "Path is not a directory. Unable to serve static files")]
IsNotDirectory,

View File

@ -178,7 +178,9 @@ fn directory_listing(
if dir.is_visible(&entry) {
let entry = entry.unwrap();
let p = match entry.path().strip_prefix(&dir.path) {
Ok(p) if cfg!(windows) => base.join(p).to_string_lossy().replace("\\", "/"),
Ok(p) if cfg!(windows) => {
base.join(p).to_string_lossy().replace("\\", "/")
}
Ok(p) => base.join(p).to_string_lossy().into_owned(),
Err(_) => continue,
};
@ -273,7 +275,7 @@ impl Files {
///
/// `File` uses `ThreadPool` for blocking filesystem operations.
/// By default pool with 5x threads of available cpus is used.
/// Pool size can be changed by setting ACTIX_CPU_POOL environment variable.
/// Pool size can be changed by setting ACTIX_THREADPOOL environment variable.
pub fn new<T: Into<PathBuf>>(path: &str, dir: T) -> Files {
let orig_dir = dir.into();
let dir = match orig_dir.canonicalize() {
@ -519,7 +521,7 @@ impl Service for FilesService {
Err(e) => return Either::Left(ok(req.error_response(e))),
};
// full filepath
// full file path
let path = match self.directory.join(&real_path.0).canonicalize() {
Ok(path) => path,
Err(e) => return self.handle_err(e, req),

View File

@ -13,7 +13,6 @@ categories = ["network-programming", "asynchronous",
"web-programming::websocket"]
license = "MIT/Apache-2.0"
edition = "2018"
workspace =".."
[lib]
name = "actix_framed"
@ -21,10 +20,10 @@ path = "src/lib.rs"
[dependencies]
actix-codec = "0.2.0"
actix-service = "1.0.0"
actix-router = "0.2.0"
actix-service = "1.0.1"
actix-router = "0.2.1"
actix-rt = "1.0.0"
actix-http = "1.0.0"
actix-http = "2.0.0-alpha.3"
bytes = "0.5.3"
futures = "0.3.1"
@ -33,6 +32,6 @@ log = "0.4"
[dev-dependencies]
actix-server = "1.0.0"
actix-connect = { version = "1.0.0", features=["openssl"] }
actix-connect = { version = "2.0.0-alpha.2", features=["openssl"] }
actix-http-test = { version = "1.0.0", features=["openssl"] }
actix-utils = "1.0.3"

View File

@ -1,5 +1,9 @@
# Changes
## [0.3.0] - 2019-12-25
* Migrate to actix-http 1.0
## [0.2.1] - 2019-07-20
* Remove unneeded actix-utils dependency

View File

@ -66,7 +66,7 @@ where
service
})
}
.boxed_local()
.boxed_local()
}
}

View File

@ -7,7 +7,7 @@ mod service;
mod state;
pub mod test;
// re-export for convinience
// re-export for convenience
pub use actix_http::{http, Error, HttpMessage, Response, ResponseError};
pub use self::app::{FramedApp, FramedAppService};

View File

@ -42,7 +42,7 @@ impl<Io, S> FramedRequest<Io, S> {
self.req.head()
}
/// This method returns muttable reference to the request head.
/// This method returns mutable reference to the request head.
/// panics if multiple references of http request exists.
#[inline]
pub fn head_mut(&mut self) -> &mut RequestHead {
@ -131,7 +131,7 @@ mod tests {
use super::*;
#[test]
fn test_reqest() {
fn test_request() {
let buf = TestBuffer::empty();
let framed = Framed::new(buf, Codec::default());
let req = TestRequest::with_uri("/index.html?q=1")

View File

@ -154,6 +154,6 @@ where
}
Ok(())
}
.boxed_local()
.boxed_local()
}
}

View File

@ -47,7 +47,8 @@ async fn test_simple() {
)
.finish(|_| future::ok::<_, Error>(Response::NotFound()))
.tcp()
});
})
.await;
assert!(srv.ws_at("/test").await.is_err());
@ -108,7 +109,8 @@ async fn test_service() {
.map_err(|_| ()),
),
)
});
})
.await;
// non ws request
let res = srv.get("/index.html").send().await.unwrap();

View File

@ -1,5 +1,51 @@
# Changes
## [2.0.0-alpha.3] - 2020-05-08
### Fixed
* Correct spelling of ConnectError::Unresolved [#1487]
* Fix a mistake in the encoding of websocket continuation messages wherein
Item::FirstText and Item::FirstBinary are each encoded as the other.
### Changed
* Implement `std::error::Error` for our custom errors [#1422]
* Remove `failure` support for `ResponseError` since that crate
will be deprecated in the near future.
[#1422]: https://github.com/actix/actix-web/pull/1422
[#1487]: https://github.com/actix/actix-web/pull/1487
## [2.0.0-alpha.2] - 2020-03-07
### Changed
* Update `actix-connect` and `actix-tls` dependency to 2.0.0-alpha.1. [#1395]
* Change default initial window size and connection window size for HTTP2 to 2MB and 1MB respectively
to improve download speed for awc when downloading large objects. [#1394]
* client::Connector accepts initial_window_size and initial_connection_window_size HTTP2 configuration. [#1394]
* client::Connector allowing to set max_http_version to limit HTTP version to be used. [#1394]
[#1394]: https://github.com/actix/actix-web/pull/1394
[#1395]: https://github.com/actix/actix-web/pull/1395
## [2.0.0-alpha.1] - 2020-02-27
### Changed
* Update the `time` dependency to 0.2.7.
* Moved actors messages support from actix crate, enabled with feature `actors`.
* Breaking change: trait MessageBody requires Unpin and accepting Pin<&mut Self> instead of &mut self in the poll_next().
* MessageBody is not implemented for &'static [u8] anymore.
### Fixed
* Allow `SameSite=None` cookies to be sent in a response.
## [1.0.1] - 2019-12-20
### Fixed

View File

@ -1,6 +1,6 @@
[package]
name = "actix-http"
version = "1.0.1"
version = "2.0.0-alpha.3"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix http primitives"
readme = "README.md"
@ -15,7 +15,7 @@ license = "MIT/Apache-2.0"
edition = "2018"
[package.metadata.docs.rs]
features = ["openssl", "rustls", "failure", "compress", "secure-cookies"]
features = ["openssl", "rustls", "compress", "secure-cookies", "actors"]
[lib]
name = "actix_http"
@ -33,26 +33,26 @@ rustls = ["actix-tls/rustls", "actix-connect/rustls"]
# enable compressison support
compress = ["flate2", "brotli2"]
# failure integration. actix does not use failure anymore
failure = ["fail-ure"]
# support for secure cookies
secure-cookies = ["ring"]
# support for actix Actor messages
actors = ["actix"]
[dependencies]
actix-service = "1.0.0"
actix-service = "1.0.5"
actix-codec = "0.2.0"
actix-connect = "1.0.1"
actix-utils = "1.0.3"
actix-connect = "2.0.0-alpha.3"
actix-utils = "1.0.6"
actix-rt = "1.0.0"
actix-threadpool = "0.3.1"
actix-tls = { version = "1.0.0", optional = true }
actix-tls = { version = "2.0.0-alpha.1", optional = true }
actix = { version = "0.10.0-alpha.1", optional = true }
base64 = "0.11"
bitflags = "1.2"
bytes = "0.5.3"
copyless = "0.1.4"
chrono = "0.4.6"
derive_more = "0.99.2"
either = "1.5.3"
encoding_rs = "0.8"
@ -74,10 +74,10 @@ rand = "0.7"
regex = "1.3"
serde = "1.0"
serde_json = "1.0"
sha1 = "0.6"
sha-1 = "0.8"
slab = "0.4"
serde_urlencoded = "0.6.1"
time = "0.1.42"
time = { version = "0.2.7", default-features = false, features = ["std"] }
# for secure cookie
ring = { version = "0.16.9", optional = true }
@ -86,16 +86,22 @@ ring = { version = "0.16.9", optional = true }
brotli2 = { version="0.3.2", optional = true }
flate2 = { version = "1.0.13", optional = true }
# optional deps
fail-ure = { version = "0.1.5", package="failure", optional = true }
[dev-dependencies]
actix-server = "1.0.0"
actix-connect = { version = "1.0.0", features=["openssl"] }
actix-server = "1.0.1"
actix-connect = { version = "2.0.0-alpha.2", features=["openssl"] }
actix-http-test = { version = "1.0.0", features=["openssl"] }
actix-tls = { version = "1.0.0", features=["openssl"] }
actix-tls = { version = "2.0.0-alpha.1", features=["openssl"] }
criterion = "0.3"
futures = "0.3.1"
env_logger = "0.6"
env_logger = "0.7"
serde_derive = "1.0"
open-ssl = { version="0.10", package = "openssl" }
rust-tls = { version="0.16", package = "rustls" }
rust-tls = { version="0.17", package = "rustls" }
[[bench]]
name = "content-length"
harness = false
[[bench]]
name = "status-line"
harness = false

View File

@ -14,19 +14,34 @@ Actix http
```rust
// see examples/framed_hello.rs for complete list of used crates.
extern crate actix_http;
use actix_http::{h1, Response, ServiceConfig};
use std::{env, io};
fn main() {
Server::new().bind("framed_hello", "127.0.0.1:8080", || {
IntoFramed::new(|| h1::Codec::new(ServiceConfig::default())) // <- create h1 codec
.and_then(TakeItem::new().map_err(|_| ())) // <- read one request
.and_then(|(_req, _framed): (_, Framed<_, _>)| { // <- send response and close conn
SendResponse::send(_framed, Response::Ok().body("Hello world!"))
.map_err(|_| ())
.map(|_| ())
})
}).unwrap().run();
use actix_http::{HttpService, Response};
use actix_server::Server;
use futures::future;
use http::header::HeaderValue;
use log::info;
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "hello_world=info");
env_logger::init();
Server::build()
.bind("hello-world", "127.0.0.1:8080", || {
HttpService::build()
.client_timeout(1000)
.client_disconnect(1000)
.finish(|_req| {
info!("{:?}", _req);
let mut res = Response::Ok();
res.header("x-head", HeaderValue::from_static("dummy value!"));
future::ok::<_, ()>(res.body("Hello world!"))
})
.tcp()
})?
.run()
.await
}
```

View File

@ -0,0 +1,267 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use bytes::BytesMut;
// benchmark sending all requests at the same time
fn bench_write_content_length(c: &mut Criterion) {
let mut group = c.benchmark_group("write_content_length");
let sizes = [
0, 1, 11, 83, 101, 653, 1001, 6323, 10001, 56329, 100001, 123456, 98724245,
4294967202,
];
for i in sizes.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_content_length(i, &mut b)
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_content_length(i, &mut b)
})
});
}
group.finish();
}
criterion_group!(benches, bench_write_content_length);
criterion_main!(benches);
mod _new {
use bytes::{BufMut, BytesMut};
const DIGITS_START: u8 = b'0';
/// NOTE: bytes object has to contain enough space
pub fn write_content_length(n: usize, bytes: &mut BytesMut) {
if n == 0 {
bytes.put_slice(b"\r\ncontent-length: 0\r\n");
return;
}
bytes.put_slice(b"\r\ncontent-length: ");
if n < 10 {
bytes.put_u8(DIGITS_START + (n as u8));
} else if n < 100 {
let n = n as u8;
let d10 = n / 10;
let d1 = n % 10;
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 1000 {
let n = n as u16;
let d100 = (n / 100) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 10_000 {
let n = n as u16;
let d1000 = (n / 1000) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 100_000 {
let n = n as u32;
let d10000 = (n / 10000) as u8;
let d1000 = ((n / 1000) % 10) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d10000);
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 1_000_000 {
let n = n as u32;
let d100000 = (n / 100000) as u8;
let d10000 = ((n / 10000) % 10) as u8;
let d1000 = ((n / 1000) % 10) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100000);
bytes.put_u8(DIGITS_START + d10000);
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else {
write_usize(n, bytes);
}
bytes.put_slice(b"\r\n");
}
fn write_usize(n: usize, bytes: &mut BytesMut) {
let mut n = n;
// 20 chars is max length of a usize (2^64)
// digits will be added to the buffer from lsd to msd
let mut buf = BytesMut::with_capacity(20);
while n > 9 {
// "pop" the least-significant digit
let lsd = (n % 10) as u8;
// remove the lsd from n
n = n / 10;
buf.put_u8(DIGITS_START + lsd);
}
// put msd to result buffer
bytes.put_u8(DIGITS_START + (n as u8));
// put, in reverse (msd to lsd), remaining digits to buffer
for i in (0..buf.len()).rev() {
bytes.put_u8(buf[i]);
}
}
}
mod _original {
use std::{mem, ptr, slice};
use bytes::{BufMut, BytesMut};
const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\
2021222324252627282930313233343536373839\
4041424344454647484950515253545556575859\
6061626364656667686970717273747576777879\
8081828384858687888990919293949596979899";
/// NOTE: bytes object has to contain enough space
pub fn write_content_length(mut n: usize, bytes: &mut BytesMut) {
if n < 10 {
let mut buf: [u8; 21] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'\r', b'\n',
];
buf[18] = (n as u8) + b'0';
bytes.put_slice(&buf);
} else if n < 100 {
let mut buf: [u8; 22] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'\r', b'\n',
];
let d1 = n << 1;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(18),
2,
);
}
bytes.put_slice(&buf);
} else if n < 1000 {
let mut buf: [u8; 23] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l',
b'e', b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'0', b'\r',
b'\n',
];
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(19),
2,
)
};
// decode last 1
buf[18] = (n as u8) + b'0';
bytes.put_slice(&buf);
} else {
bytes.put_slice(b"\r\ncontent-length: ");
convert_usize(n, bytes);
}
}
pub(crate) fn convert_usize(mut n: usize, bytes: &mut BytesMut) {
let mut curr: isize = 39;
let mut buf: [u8; 41] = unsafe { mem::MaybeUninit::uninit().assume_init() };
buf[39] = b'\r';
buf[40] = b'\n';
let buf_ptr = buf.as_mut_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
// eagerly decode 4 characters at a time
while n >= 10_000 {
let rem = (n % 10_000) as isize;
n /= 10_000;
let d1 = (rem / 100) << 1;
let d2 = (rem % 100) << 1;
curr -= 4;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
ptr::copy_nonoverlapping(
lut_ptr.offset(d2),
buf_ptr.offset(curr + 2),
2,
);
}
}
// if we reach here numbers are <= 9999, so at most 4 chars long
let mut n = n as isize; // possibly reduce 64bit math
// decode 2 more chars, if > 2 chars
if n >= 100 {
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0';
}
} else {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
unsafe {
bytes.extend_from_slice(slice::from_raw_parts(
buf_ptr.offset(curr),
41 - curr as usize,
));
}
}
}

View File

@ -0,0 +1,222 @@
use criterion::{criterion_group, criterion_main, BenchmarkId, Criterion};
use bytes::BytesMut;
use http::Version;
const CODES: &[u16] = &[201, 303, 404, 515];
fn bench_write_status_line_11(c: &mut Criterion) {
let mut group = c.benchmark_group("write_status_line v1.1");
let version = Version::HTTP_11;
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_naive::write_status_line(version, i, &mut b);
})
});
}
group.finish();
}
fn bench_write_status_line_10(c: &mut Criterion) {
let mut group = c.benchmark_group("write_status_line v1.0");
let version = Version::HTTP_10;
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_naive::write_status_line(version, i, &mut b);
})
});
}
group.finish();
}
fn bench_write_status_line_09(c: &mut Criterion) {
let mut group = c.benchmark_group("write_status_line v0.9");
let version = Version::HTTP_09;
for i in CODES.iter() {
group.bench_with_input(BenchmarkId::new("Original (unsafe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_original::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("New (safe)", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_new::write_status_line(version, i, &mut b);
})
});
group.bench_with_input(BenchmarkId::new("Naive", i), i, |b, &i| {
b.iter(|| {
let mut b = BytesMut::with_capacity(35);
_naive::write_status_line(version, i, &mut b);
})
});
}
group.finish();
}
criterion_group!(
benches,
bench_write_status_line_11,
bench_write_status_line_10,
bench_write_status_line_09
);
criterion_main!(benches);
mod _naive {
use bytes::{BufMut, BytesMut};
use http::Version;
pub(crate) fn write_status_line(version: Version, n: u16, bytes: &mut BytesMut) {
match version {
Version::HTTP_11 => bytes.put_slice(b"HTTP/1.1 "),
Version::HTTP_10 => bytes.put_slice(b"HTTP/1.0 "),
Version::HTTP_09 => bytes.put_slice(b"HTTP/0.9 "),
_ => {
// other HTTP version handlers do not use this method
}
}
bytes.put_slice(n.to_string().as_bytes());
}
}
mod _new {
use bytes::{BufMut, BytesMut};
use http::Version;
const DIGITS_START: u8 = b'0';
pub(crate) fn write_status_line(version: Version, n: u16, bytes: &mut BytesMut) {
match version {
Version::HTTP_11 => bytes.put_slice(b"HTTP/1.1 "),
Version::HTTP_10 => bytes.put_slice(b"HTTP/1.0 "),
Version::HTTP_09 => bytes.put_slice(b"HTTP/0.9 "),
_ => {
// other HTTP version handlers do not use this method
}
}
let d100 = (n / 100) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
bytes.put_u8(b' ');
}
}
mod _original {
use std::ptr;
use bytes::{BufMut, BytesMut};
use http::Version;
const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\
2021222324252627282930313233343536373839\
4041424344454647484950515253545556575859\
6061626364656667686970717273747576777879\
8081828384858687888990919293949596979899";
pub(crate) const STATUS_LINE_BUF_SIZE: usize = 13;
pub(crate) fn write_status_line(version: Version, mut n: u16, bytes: &mut BytesMut) {
let mut buf: [u8; STATUS_LINE_BUF_SIZE] = *b"HTTP/1.1 ";
match version {
Version::HTTP_2 => buf[5] = b'2',
Version::HTTP_10 => buf[7] = b'0',
Version::HTTP_09 => {
buf[5] = b'0';
buf[7] = b'9';
}
_ => (),
}
let mut curr: isize = 12;
let buf_ptr = buf.as_mut_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
let four = n > 999;
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(
lut_ptr.offset(d1 as isize),
buf_ptr.offset(curr),
2,
);
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0';
}
} else {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(
lut_ptr.offset(d1 as isize),
buf_ptr.offset(curr),
2,
);
}
}
bytes.put_slice(&buf);
if four {
bytes.put_u8(b' ');
}
}
}

View File

@ -7,7 +7,8 @@ use futures::StreamExt;
use http::header::HeaderValue;
use log::info;
fn main() -> io::Result<()> {
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "echo=info");
env_logger::init();
@ -16,25 +17,21 @@ fn main() -> io::Result<()> {
HttpService::build()
.client_timeout(1000)
.client_disconnect(1000)
.finish(|mut req: Request| {
async move {
let mut body = BytesMut::new();
while let Some(item) = req.payload().next().await {
body.extend_from_slice(&item?);
}
info!("request body: {:?}", body);
Ok::<_, Error>(
Response::Ok()
.header(
"x-head",
HeaderValue::from_static("dummy value!"),
)
.body(body),
)
.finish(|mut req: Request| async move {
let mut body = BytesMut::new();
while let Some(item) = req.payload().next().await {
body.extend_from_slice(&item?);
}
info!("request body: {:?}", body);
Ok::<_, Error>(
Response::Ok()
.header("x-head", HeaderValue::from_static("dummy value!"))
.body(body),
)
})
.tcp()
})?
.run()
.await
}

View File

@ -19,7 +19,8 @@ async fn handle_request(mut req: Request) -> Result<Response, Error> {
.body(body))
}
fn main() -> io::Result<()> {
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "echo=info");
env_logger::init();
@ -28,4 +29,5 @@ fn main() -> io::Result<()> {
HttpService::build().finish(handle_request).tcp()
})?
.run()
.await
}

View File

@ -6,7 +6,8 @@ use futures::future;
use http::header::HeaderValue;
use log::info;
fn main() -> io::Result<()> {
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "hello_world=info");
env_logger::init();
@ -24,4 +25,5 @@ fn main() -> io::Result<()> {
.tcp()
})?
.run()
.await
}

View File

@ -5,6 +5,7 @@ use std::{fmt, mem};
use bytes::{Bytes, BytesMut};
use futures_core::Stream;
use futures_util::ready;
use pin_project::{pin_project, project};
use crate::error::Error;
@ -35,33 +36,46 @@ impl BodySize {
pub trait MessageBody {
fn size(&self) -> BodySize;
fn poll_next(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>>;
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>>;
downcast_get_type_id!();
}
downcast!(MessageBody);
impl MessageBody for () {
fn size(&self) -> BodySize {
BodySize::Empty
}
fn poll_next(&mut self, _: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
Poll::Ready(None)
}
}
impl<T: MessageBody> MessageBody for Box<T> {
impl<T: MessageBody + Unpin> MessageBody for Box<T> {
fn size(&self) -> BodySize {
self.as_ref().size()
}
fn poll_next(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
self.as_mut().poll_next(cx)
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
Pin::new(self.get_mut().as_mut()).poll_next(cx)
}
}
#[pin_project]
pub enum ResponseBody<B> {
Body(B),
Other(Body),
Body(#[pin] B),
Other(#[pin] Body),
}
impl ResponseBody<Body> {
@ -97,10 +111,15 @@ impl<B: MessageBody> MessageBody for ResponseBody<B> {
}
}
fn poll_next(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
match self {
ResponseBody::Body(ref mut body) => body.poll_next(cx),
ResponseBody::Other(ref mut body) => body.poll_next(cx),
#[project]
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
#[project]
match self.project() {
ResponseBody::Body(body) => body.poll_next(cx),
ResponseBody::Other(body) => body.poll_next(cx),
}
}
}
@ -115,12 +134,13 @@ impl<B: MessageBody> Stream for ResponseBody<B> {
) -> Poll<Option<Self::Item>> {
#[project]
match self.project() {
ResponseBody::Body(ref mut body) => body.poll_next(cx),
ResponseBody::Other(ref mut body) => body.poll_next(cx),
ResponseBody::Body(body) => body.poll_next(cx),
ResponseBody::Other(body) => body.poll_next(cx),
}
}
}
#[pin_project]
/// Represents various types of http message body.
pub enum Body {
/// Empty response. `Content-Length` header is not set.
@ -130,7 +150,7 @@ pub enum Body {
/// Specific response body.
Bytes(Bytes),
/// Generic message body.
Message(Box<dyn MessageBody>),
Message(Box<dyn MessageBody + Unpin>),
}
impl Body {
@ -140,7 +160,7 @@ impl Body {
}
/// Create body from generic message body.
pub fn from_message<B: MessageBody + 'static>(body: B) -> Body {
pub fn from_message<B: MessageBody + Unpin + 'static>(body: B) -> Body {
Body::Message(Box::new(body))
}
}
@ -155,8 +175,13 @@ impl MessageBody for Body {
}
}
fn poll_next(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
match self {
#[project]
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
#[project]
match self.project() {
Body::None => Poll::Ready(None),
Body::Empty => Poll::Ready(None),
Body::Bytes(ref mut bin) => {
@ -167,7 +192,7 @@ impl MessageBody for Body {
Poll::Ready(Some(Ok(mem::replace(bin, Bytes::new()))))
}
}
Body::Message(ref mut body) => body.poll_next(cx),
Body::Message(ref mut body) => Pin::new(body.as_mut()).poll_next(cx),
}
}
}
@ -253,7 +278,7 @@ impl From<serde_json::Value> for Body {
impl<S> From<SizedStream<S>> for Body
where
S: Stream<Item = Result<Bytes, Error>> + 'static,
S: Stream<Item = Result<Bytes, Error>> + Unpin + 'static,
{
fn from(s: SizedStream<S>) -> Body {
Body::from_message(s)
@ -262,7 +287,7 @@ where
impl<S, E> From<BodyStream<S, E>> for Body
where
S: Stream<Item = Result<Bytes, E>> + 'static,
S: Stream<Item = Result<Bytes, E>> + Unpin + 'static,
E: Into<Error> + 'static,
{
fn from(s: BodyStream<S, E>) -> Body {
@ -275,11 +300,14 @@ impl MessageBody for Bytes {
BodySize::Sized(self.len())
}
fn poll_next(&mut self, _: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::replace(self, Bytes::new()))))
Poll::Ready(Some(Ok(mem::replace(self.get_mut(), Bytes::new()))))
}
}
}
@ -289,11 +317,16 @@ impl MessageBody for BytesMut {
BodySize::Sized(self.len())
}
fn poll_next(&mut self, _: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(mem::replace(self, BytesMut::new()).freeze())))
Poll::Ready(Some(Ok(
mem::replace(self.get_mut(), BytesMut::new()).freeze()
)))
}
}
}
@ -303,41 +336,36 @@ impl MessageBody for &'static str {
BodySize::Sized(self.len())
}
fn poll_next(&mut self, _: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from_static(
mem::replace(self, "").as_ref(),
mem::replace(self.get_mut(), "").as_ref(),
))))
}
}
}
impl MessageBody for &'static [u8] {
fn size(&self) -> BodySize {
BodySize::Sized(self.len())
}
fn poll_next(&mut self, _: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from_static(mem::replace(self, b"")))))
}
}
}
impl MessageBody for Vec<u8> {
fn size(&self) -> BodySize {
BodySize::Sized(self.len())
}
fn poll_next(&mut self, _: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from(mem::replace(self, Vec::new())))))
Poll::Ready(Some(Ok(Bytes::from(mem::replace(
self.get_mut(),
Vec::new(),
)))))
}
}
}
@ -347,12 +375,15 @@ impl MessageBody for String {
BodySize::Sized(self.len())
}
fn poll_next(&mut self, _: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
fn poll_next(
self: Pin<&mut Self>,
_: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
if self.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(Bytes::from(
mem::replace(self, String::new()).into_bytes(),
mem::replace(self.get_mut(), String::new()).into_bytes(),
))))
}
}
@ -361,7 +392,7 @@ impl MessageBody for String {
/// Type represent streaming body.
/// Response does not contain `content-length` header and appropriate transfer encoding is used.
#[pin_project]
pub struct BodyStream<S, E> {
pub struct BodyStream<S: Unpin, E> {
#[pin]
stream: S,
_t: PhantomData<E>,
@ -369,7 +400,7 @@ pub struct BodyStream<S, E> {
impl<S, E> BodyStream<S, E>
where
S: Stream<Item = Result<Bytes, E>>,
S: Stream<Item = Result<Bytes, E>> + Unpin,
E: Into<Error>,
{
pub fn new(stream: S) -> Self {
@ -382,26 +413,37 @@ where
impl<S, E> MessageBody for BodyStream<S, E>
where
S: Stream<Item = Result<Bytes, E>>,
S: Stream<Item = Result<Bytes, E>> + Unpin,
E: Into<Error>,
{
fn size(&self) -> BodySize {
BodySize::Stream
}
fn poll_next(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
unsafe { Pin::new_unchecked(self) }
.project()
.stream
.poll_next(cx)
.map(|res| res.map(|res| res.map_err(std::convert::Into::into)))
/// Attempts to pull out the next value of the underlying [`Stream`].
///
/// Empty values are skipped to prevent [`BodyStream`]'s transmission being
/// ended on a zero-length chunk, but rather proceed until the underlying
/// [`Stream`] ends.
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
let mut stream = self.project().stream;
loop {
let stream = stream.as_mut();
return Poll::Ready(match ready!(stream.poll_next(cx)) {
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
opt => opt.map(|res| res.map_err(Into::into)),
});
}
}
}
/// Type represent streaming body. This body implementation should be used
/// if total size of stream is known. Data get sent as is without using transfer encoding.
#[pin_project]
pub struct SizedStream<S> {
pub struct SizedStream<S: Unpin> {
size: u64,
#[pin]
stream: S,
@ -409,7 +451,7 @@ pub struct SizedStream<S> {
impl<S> SizedStream<S>
where
S: Stream<Item = Result<Bytes, Error>>,
S: Stream<Item = Result<Bytes, Error>> + Unpin,
{
pub fn new(size: u64, stream: S) -> Self {
SizedStream { size, stream }
@ -418,24 +460,38 @@ where
impl<S> MessageBody for SizedStream<S>
where
S: Stream<Item = Result<Bytes, Error>>,
S: Stream<Item = Result<Bytes, Error>> + Unpin,
{
fn size(&self) -> BodySize {
BodySize::Sized64(self.size)
}
fn poll_next(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
unsafe { Pin::new_unchecked(self) }
.project()
.stream
.poll_next(cx)
/// Attempts to pull out the next value of the underlying [`Stream`].
///
/// Empty values are skipped to prevent [`SizedStream`]'s transmission being
/// ended on a zero-length chunk, but rather proceed until the underlying
/// [`Stream`] ends.
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
let mut stream: Pin<&mut S> = self.project().stream;
loop {
let stream = stream.as_mut();
return Poll::Ready(match ready!(stream.poll_next(cx)) {
Some(Ok(ref bytes)) if bytes.is_empty() => continue,
val => val,
});
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use futures::stream;
use futures_util::future::poll_fn;
use futures_util::pin_mut;
impl Body {
pub(crate) fn get_ref(&self) -> &[u8] {
@ -463,7 +519,10 @@ mod tests {
assert_eq!("test".size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| "test".poll_next(cx)).await.unwrap().ok(),
poll_fn(|cx| Pin::new(&mut "test").poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("test"))
);
}
@ -477,13 +536,12 @@ mod tests {
BodySize::Sized(4)
);
assert_eq!(Body::from_slice(b"test".as_ref()).get_ref(), b"test");
let sb = Bytes::from(&b"test"[..]);
pin_mut!(sb);
assert_eq!((&b"test"[..]).size(), BodySize::Sized(4));
assert_eq!(sb.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| (&b"test"[..]).poll_next(cx))
.await
.unwrap()
.ok(),
poll_fn(|cx| sb.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
@ -492,10 +550,12 @@ mod tests {
async fn test_vec() {
assert_eq!(Body::from(Vec::from("test")).size(), BodySize::Sized(4));
assert_eq!(Body::from(Vec::from("test")).get_ref(), b"test");
let test_vec = Vec::from("test");
pin_mut!(test_vec);
assert_eq!(Vec::from("test").size(), BodySize::Sized(4));
assert_eq!(test_vec.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| Vec::from("test").poll_next(cx))
poll_fn(|cx| test_vec.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
@ -505,41 +565,44 @@ mod tests {
#[actix_rt::test]
async fn test_bytes() {
let mut b = Bytes::from("test");
let b = Bytes::from("test");
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
pin_mut!(b);
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.poll_next(cx)).await.unwrap().ok(),
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_bytes_mut() {
let mut b = BytesMut::from("test");
let b = BytesMut::from("test");
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
pin_mut!(b);
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.poll_next(cx)).await.unwrap().ok(),
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
#[actix_rt::test]
async fn test_string() {
let mut b = "test".to_owned();
let b = "test".to_owned();
assert_eq!(Body::from(b.clone()).size(), BodySize::Sized(4));
assert_eq!(Body::from(b.clone()).get_ref(), b"test");
assert_eq!(Body::from(&b).size(), BodySize::Sized(4));
assert_eq!(Body::from(&b).get_ref(), b"test");
pin_mut!(b);
assert_eq!(b.size(), BodySize::Sized(4));
assert_eq!(
poll_fn(|cx| b.poll_next(cx)).await.unwrap().ok(),
poll_fn(|cx| b.as_mut().poll_next(cx)).await.unwrap().ok(),
Some(Bytes::from("test"))
);
}
@ -547,14 +610,17 @@ mod tests {
#[actix_rt::test]
async fn test_unit() {
assert_eq!(().size(), BodySize::Empty);
assert!(poll_fn(|cx| ().poll_next(cx)).await.is_none());
assert!(poll_fn(|cx| Pin::new(&mut ()).poll_next(cx))
.await
.is_none());
}
#[actix_rt::test]
async fn test_box() {
let mut val = Box::new(());
let val = Box::new(());
pin_mut!(val);
assert_eq!(val.size(), BodySize::Empty);
assert!(poll_fn(|cx| val.poll_next(cx)).await.is_none());
assert!(poll_fn(|cx| val.as_mut().poll_next(cx)).await.is_none());
}
#[actix_rt::test]
@ -589,4 +655,97 @@ mod tests {
BodySize::Sized(25)
);
}
mod body_stream {
use super::*;
//use futures::task::noop_waker;
//use futures::stream::once;
#[actix_rt::test]
async fn skips_empty_chunks() {
let body = BodyStream::new(stream::iter(
["1", "", "2"]
.iter()
.map(|&v| Ok(Bytes::from(v)) as Result<Bytes, ()>),
));
pin_mut!(body);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("1")),
);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("2")),
);
}
/* Now it does not compile as it should
#[actix_rt::test]
async fn move_pinned_pointer() {
let (sender, receiver) = futures::channel::oneshot::channel();
let mut body_stream = Ok(BodyStream::new(once(async {
let x = Box::new(0i32);
let y = &x;
receiver.await.unwrap();
let _z = **y;
Ok::<_, ()>(Bytes::new())
})));
let waker = noop_waker();
let mut context = Context::from_waker(&waker);
pin_mut!(body_stream);
let _ = body_stream.as_mut().unwrap().poll_next(&mut context);
sender.send(()).unwrap();
let _ = std::mem::replace(&mut body_stream, Err([0; 32])).unwrap().poll_next(&mut context);
}*/
}
mod sized_stream {
use super::*;
#[actix_rt::test]
async fn skips_empty_chunks() {
let body = SizedStream::new(
2,
stream::iter(["1", "", "2"].iter().map(|&v| Ok(Bytes::from(v)))),
);
pin_mut!(body);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("1")),
);
assert_eq!(
poll_fn(|cx| body.as_mut().poll_next(cx))
.await
.unwrap()
.ok(),
Some(Bytes::from("2")),
);
}
}
#[actix_rt::test]
async fn test_body_casting() {
let mut body = String::from("hello cast");
let resp_body: &mut dyn MessageBody = &mut body;
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast");
let body = &mut resp_body.downcast_mut::<String>().unwrap();
body.push_str("!");
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast!");
let not_body = resp_body.downcast_ref::<()>();
assert!(not_body.is_none());
}
}

View File

@ -0,0 +1,39 @@
use std::time::Duration;
// These values are taken from hyper/src/proto/h2/client.rs
const DEFAULT_H2_CONN_WINDOW: u32 = 1024 * 1024 * 2; // 2mb
const DEFAULT_H2_STREAM_WINDOW: u32 = 1024 * 1024; // 1mb
/// Connector configuration
#[derive(Clone)]
pub(crate) struct ConnectorConfig {
pub(crate) timeout: Duration,
pub(crate) conn_lifetime: Duration,
pub(crate) conn_keep_alive: Duration,
pub(crate) disconnect_timeout: Option<Duration>,
pub(crate) limit: usize,
pub(crate) conn_window_size: u32,
pub(crate) stream_window_size: u32,
}
impl Default for ConnectorConfig {
fn default() -> Self {
Self {
timeout: Duration::from_secs(1),
conn_lifetime: Duration::from_secs(75),
conn_keep_alive: Duration::from_secs(15),
disconnect_timeout: Some(Duration::from_millis(3000)),
limit: 100,
conn_window_size: DEFAULT_H2_CONN_WINDOW,
stream_window_size: DEFAULT_H2_STREAM_WINDOW,
}
}
}
impl ConnectorConfig {
pub(crate) fn no_disconnect_timeout(&self) -> Self {
let mut res = self.clone();
res.disconnect_timeout = None;
res
}
}

View File

@ -11,6 +11,7 @@ use actix_service::{apply_fn, Service};
use actix_utils::timeout::{TimeoutError, TimeoutService};
use http::Uri;
use super::config::ConnectorConfig;
use super::connection::Connection;
use super::error::ConnectError;
use super::pool::{ConnectionPool, Protocol};
@ -48,11 +49,7 @@ type SslConnector = ();
/// ```
pub struct Connector<T, U> {
connector: T,
timeout: Duration,
conn_lifetime: Duration,
conn_keep_alive: Duration,
disconnect_timeout: Duration,
limit: usize,
config: ConnectorConfig,
#[allow(dead_code)]
ssl: SslConnector,
_t: PhantomData<U>,
@ -71,42 +68,47 @@ impl Connector<(), ()> {
> + Clone,
TcpStream,
> {
let ssl = {
#[cfg(feature = "openssl")]
{
use actix_connect::ssl::openssl::SslMethod;
let mut ssl = OpensslConnector::builder(SslMethod::tls()).unwrap();
let _ = ssl
.set_alpn_protos(b"\x02h2\x08http/1.1")
.map_err(|e| error!("Can not set alpn protocol: {:?}", e));
SslConnector::Openssl(ssl.build())
}
#[cfg(all(not(feature = "openssl"), feature = "rustls"))]
{
let protos = vec![b"h2".to_vec(), b"http/1.1".to_vec()];
let mut config = ClientConfig::new();
config.set_protocols(&protos);
config
.root_store
.add_server_trust_anchors(&actix_tls::rustls::TLS_SERVER_ROOTS);
SslConnector::Rustls(Arc::new(config))
}
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
{}
};
Connector {
ssl,
ssl: Self::build_ssl(vec![b"h2".to_vec(), b"http/1.1".to_vec()]),
connector: default_connector(),
timeout: Duration::from_secs(1),
conn_lifetime: Duration::from_secs(75),
conn_keep_alive: Duration::from_secs(15),
disconnect_timeout: Duration::from_millis(3000),
limit: 100,
config: ConnectorConfig::default(),
_t: PhantomData,
}
}
// Build Ssl connector with openssl, based on supplied alpn protocols
#[cfg(feature = "openssl")]
fn build_ssl(protocols: Vec<Vec<u8>>) -> SslConnector {
use actix_connect::ssl::openssl::SslMethod;
use bytes::{BufMut, BytesMut};
let mut alpn = BytesMut::with_capacity(20);
for proto in protocols.iter() {
alpn.put_u8(proto.len() as u8);
alpn.put(proto.as_slice());
}
let mut ssl = OpensslConnector::builder(SslMethod::tls()).unwrap();
let _ = ssl
.set_alpn_protos(&alpn)
.map_err(|e| error!("Can not set alpn protocol: {:?}", e));
SslConnector::Openssl(ssl.build())
}
// Build Ssl connector with rustls, based on supplied alpn protocols
#[cfg(all(not(feature = "openssl"), feature = "rustls"))]
fn build_ssl(protocols: Vec<Vec<u8>>) -> SslConnector {
let mut config = ClientConfig::new();
config.set_protocols(&protocols);
config
.root_store
.add_server_trust_anchors(&actix_tls::rustls::TLS_SERVER_ROOTS);
SslConnector::Rustls(Arc::new(config))
}
// ssl turned off, provides empty ssl connector
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
fn build_ssl(_: Vec<Vec<u8>>) -> SslConnector {}
}
impl<T, U> Connector<T, U> {
@ -122,11 +124,7 @@ impl<T, U> Connector<T, U> {
{
Connector {
connector,
timeout: self.timeout,
conn_lifetime: self.conn_lifetime,
conn_keep_alive: self.conn_keep_alive,
disconnect_timeout: self.disconnect_timeout,
limit: self.limit,
config: self.config,
ssl: self.ssl,
_t: PhantomData,
}
@ -146,7 +144,7 @@ where
/// Connection timeout, i.e. max time to connect to remote host including dns name resolution.
/// Set to 1 second by default.
pub fn timeout(mut self, timeout: Duration) -> Self {
self.timeout = timeout;
self.config.timeout = timeout;
self
}
@ -163,12 +161,44 @@ where
self
}
/// Maximum supported http major version
/// Supported versions http/1.1, http/2
pub fn max_http_version(mut self, val: http::Version) -> Self {
let versions = match val {
http::Version::HTTP_11 => vec![b"http/1.1".to_vec()],
http::Version::HTTP_2 => vec![b"h2".to_vec(), b"http/1.1".to_vec()],
_ => {
unimplemented!("actix-http:client: supported versions http/1.1, http/2")
}
};
self.ssl = Connector::build_ssl(versions);
self
}
/// Indicates the initial window size (in octets) for
/// HTTP2 stream-level flow control for received data.
///
/// The default value is 65,535 and is good for APIs, but not for big objects.
pub fn initial_window_size(mut self, size: u32) -> Self {
self.config.stream_window_size = size;
self
}
/// Indicates the initial window size (in octets) for
/// HTTP2 connection-level flow control for received data.
///
/// The default value is 65,535 and is good for APIs, but not for big objects.
pub fn initial_connection_window_size(mut self, size: u32) -> Self {
self.config.conn_window_size = size;
self
}
/// Set total number of simultaneous connections per type of scheme.
///
/// If limit is 0, the connector has no limit.
/// The default limit size is 100.
pub fn limit(mut self, limit: usize) -> Self {
self.limit = limit;
self.config.limit = limit;
self
}
@ -179,7 +209,7 @@ where
/// exceeds this period, the connection is closed.
/// Default keep-alive period is 15 seconds.
pub fn conn_keep_alive(mut self, dur: Duration) -> Self {
self.conn_keep_alive = dur;
self.config.conn_keep_alive = dur;
self
}
@ -189,7 +219,7 @@ where
/// until it is closed regardless of keep-alive period.
/// Default lifetime period is 75 seconds.
pub fn conn_lifetime(mut self, dur: Duration) -> Self {
self.conn_lifetime = dur;
self.config.conn_lifetime = dur;
self
}
@ -202,7 +232,7 @@ where
///
/// By default disconnect timeout is set to 3000 milliseconds.
pub fn disconnect_timeout(mut self, dur: Duration) -> Self {
self.disconnect_timeout = dur;
self.config.disconnect_timeout = Some(dur);
self
}
@ -216,7 +246,7 @@ where
#[cfg(not(any(feature = "openssl", feature = "rustls")))]
{
let connector = TimeoutService::new(
self.timeout,
self.config.timeout,
apply_fn(self.connector, |msg: Connect, srv| {
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
})
@ -231,10 +261,7 @@ where
connect_impl::InnerConnector {
tcp_pool: ConnectionPool::new(
connector,
self.conn_lifetime,
self.conn_keep_alive,
None,
self.limit,
self.config.no_disconnect_timeout(),
),
}
}
@ -248,7 +275,7 @@ where
use actix_service::{boxed::service, pipeline};
let ssl_service = TimeoutService::new(
self.timeout,
self.config.timeout,
pipeline(
apply_fn(self.connector.clone(), |msg: Connect, srv| {
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
@ -301,7 +328,7 @@ where
});
let tcp_service = TimeoutService::new(
self.timeout,
self.config.timeout,
apply_fn(self.connector, |msg: Connect, srv| {
srv.call(TcpConnect::new(msg.uri).set_addr(msg.addr))
})
@ -316,18 +343,9 @@ where
connect_impl::InnerConnector {
tcp_pool: ConnectionPool::new(
tcp_service,
self.conn_lifetime,
self.conn_keep_alive,
None,
self.limit,
),
ssl_pool: ConnectionPool::new(
ssl_service,
self.conn_lifetime,
self.conn_keep_alive,
Some(self.disconnect_timeout),
self.limit,
self.config.no_disconnect_timeout(),
),
ssl_pool: ConnectionPool::new(ssl_service, self.config),
}
}
}

View File

@ -48,20 +48,22 @@ pub enum ConnectError {
/// Unresolved host name
#[display(fmt = "Connector received `Connect` method with unresolved host")]
Unresolverd,
Unresolved,
/// Connection io error
#[display(fmt = "{}", _0)]
Io(io::Error),
}
impl std::error::Error for ConnectError {}
impl From<actix_connect::ConnectError> for ConnectError {
fn from(err: actix_connect::ConnectError) -> ConnectError {
match err {
actix_connect::ConnectError::Resolver(e) => ConnectError::Resolver(e),
actix_connect::ConnectError::NoRecords => ConnectError::NoRecords,
actix_connect::ConnectError::InvalidInput => panic!(),
actix_connect::ConnectError::Unresolverd => ConnectError::Unresolverd,
actix_connect::ConnectError::Unresolved => ConnectError::Unresolved,
actix_connect::ConnectError::Io(e) => ConnectError::Io(e),
}
}
@ -86,6 +88,8 @@ pub enum InvalidUrl {
HttpError(http::Error),
}
impl std::error::Error for InvalidUrl {}
/// A set of errors that can occur during request sending and response reading
#[derive(Debug, Display, From)]
pub enum SendRequestError {
@ -115,6 +119,8 @@ pub enum SendRequestError {
Body(Error),
}
impl std::error::Error for SendRequestError {}
/// Convert `SendRequestError` to a server `Response`
impl ResponseError for SendRequestError {
fn status_code(&self) -> StatusCode {
@ -139,6 +145,8 @@ pub enum FreezeRequestError {
Http(HttpError),
}
impl std::error::Error for FreezeRequestError {}
impl From<FreezeRequestError> for SendRequestError {
fn from(e: FreezeRequestError) -> Self {
match e {

View File

@ -8,7 +8,7 @@ use bytes::buf::BufMutExt;
use bytes::{Bytes, BytesMut};
use futures_core::Stream;
use futures_util::future::poll_fn;
use futures_util::{SinkExt, StreamExt};
use futures_util::{pin_mut, SinkExt, StreamExt};
use crate::error::PayloadError;
use crate::h1;
@ -120,7 +120,7 @@ where
/// send request body to the peer
pub(crate) async fn send_body<I, B>(
mut body: B,
body: B,
framed: &mut Framed<I, h1::ClientCodec>,
) -> Result<(), SendRequestError>
where
@ -128,9 +128,10 @@ where
B: MessageBody,
{
let mut eof = false;
pin_mut!(body);
while !eof {
while !eof && !framed.is_write_buf_full() {
match poll_fn(|cx| body.poll_next(cx)).await {
match poll_fn(|cx| body.as_mut().poll_next(cx)).await {
Some(result) => {
framed.write(h1::Message::Chunk(Some(result?)))?;
}

View File

@ -1,10 +1,15 @@
use std::convert::TryFrom;
use std::future::Future;
use std::time;
use actix_codec::{AsyncRead, AsyncWrite};
use bytes::Bytes;
use futures_util::future::poll_fn;
use h2::{client::SendRequest, SendStream};
use futures_util::pin_mut;
use h2::{
client::{Builder, Connection, SendRequest},
SendStream,
};
use http::header::{HeaderValue, CONNECTION, CONTENT_LENGTH, TRANSFER_ENCODING};
use http::{request::Request, Method, Version};
@ -13,6 +18,7 @@ use crate::header::HeaderMap;
use crate::message::{RequestHeadType, ResponseHead};
use crate::payload::Payload;
use super::config::ConnectorConfig;
use super::connection::{ConnectionType, IoConnection};
use super::error::SendRequestError;
use super::pool::Acquired;
@ -123,13 +129,14 @@ where
}
async fn send_body<B: MessageBody>(
mut body: B,
body: B,
mut send: SendStream<Bytes>,
) -> Result<(), SendRequestError> {
let mut buf = None;
pin_mut!(body);
loop {
if buf.is_none() {
match poll_fn(|cx| body.poll_next(cx)).await {
match poll_fn(|cx| body.as_mut().poll_next(cx)).await {
Some(Ok(b)) => {
send.reserve_capacity(b.len());
buf = Some(b);
@ -183,3 +190,18 @@ fn release<T: AsyncRead + AsyncWrite + Unpin + 'static>(
}
}
}
pub(crate) fn handshake<Io>(
io: Io,
config: &ConnectorConfig,
) -> impl Future<Output = Result<(SendRequest<Bytes>, Connection<Io, Bytes>), h2::Error>>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
let mut builder = Builder::new();
builder
.initial_window_size(config.stream_window_size)
.initial_connection_window_size(config.conn_window_size)
.enable_push(false);
builder.handshake(io)
}

View File

@ -1,6 +1,7 @@
//! Http client api
use http::Uri;
mod config;
mod connection;
mod connector;
mod error;

View File

@ -13,13 +13,16 @@ use actix_utils::{oneshot, task::LocalWaker};
use bytes::Bytes;
use futures_util::future::{poll_fn, FutureExt, LocalBoxFuture};
use fxhash::FxHashMap;
use h2::client::{handshake, Connection, SendRequest};
use h2::client::{Connection, SendRequest};
use http::uri::Authority;
use indexmap::IndexSet;
use pin_project::pin_project;
use slab::Slab;
use super::config::ConnectorConfig;
use super::connection::{ConnectionType, IoConnection};
use super::error::ConnectError;
use super::h2proto::handshake;
use super::Connect;
#[derive(Clone, Copy, PartialEq)]
@ -49,20 +52,11 @@ where
T: Service<Request = Connect, Response = (Io, Protocol), Error = ConnectError>
+ 'static,
{
pub(crate) fn new(
connector: T,
conn_lifetime: Duration,
conn_keep_alive: Duration,
disconnect_timeout: Option<Duration>,
limit: usize,
) -> Self {
pub(crate) fn new(connector: T, config: ConnectorConfig) -> Self {
ConnectionPool(
Rc::new(RefCell::new(connector)),
Rc::new(RefCell::new(Inner {
conn_lifetime,
conn_keep_alive,
disconnect_timeout,
limit,
config,
acquired: 0,
waiters: Slab::new(),
waiters_queue: IndexSet::new(),
@ -111,7 +105,7 @@ where
let key = if let Some(authority) = req.uri.authority() {
authority.clone().into()
} else {
return Err(ConnectError::Unresolverd);
return Err(ConnectError::Unresolved);
};
// acquire connection
@ -128,6 +122,8 @@ where
// open tcp connection
let (io, proto) = connector.call(req).await?;
let config = inner.borrow().config.clone();
let guard = OpenGuard::new(key, inner);
if proto == Protocol::Http1 {
@ -137,7 +133,7 @@ where
Some(guard.consume()),
))
} else {
let (snd, connection) = handshake(io).await?;
let (snd, connection) = handshake(io, &config).await?;
actix_rt::spawn(connection.map(|_| ()));
Ok(IoConnection::new(
ConnectionType::H2(snd),
@ -199,7 +195,7 @@ where
if let Some(i) = self.inner.take() {
let mut inner = i.as_ref().borrow_mut();
inner.release_waiter(&self.key, self.token);
inner.check_availibility();
inner.check_availability();
}
}
}
@ -236,7 +232,7 @@ where
if let Some(i) = self.inner.take() {
let mut inner = i.as_ref().borrow_mut();
inner.release();
inner.check_availibility();
inner.check_availability();
}
}
}
@ -254,10 +250,7 @@ struct AvailableConnection<Io> {
}
pub(crate) struct Inner<Io> {
conn_lifetime: Duration,
conn_keep_alive: Duration,
disconnect_timeout: Option<Duration>,
limit: usize,
config: ConnectorConfig,
acquired: usize,
available: FxHashMap<Key, VecDeque<AvailableConnection<Io>>>,
waiters: Slab<
@ -310,7 +303,7 @@ where
fn acquire(&mut self, key: &Key, cx: &mut Context<'_>) -> Acquire<Io> {
// check limits
if self.limit > 0 && self.acquired >= self.limit {
if self.config.limit > 0 && self.acquired >= self.config.limit {
return Acquire::NotAvailable;
}
@ -322,10 +315,10 @@ where
let now = Instant::now();
while let Some(conn) = connections.pop_back() {
// check if it still usable
if (now - conn.used) > self.conn_keep_alive
|| (now - conn.created) > self.conn_lifetime
if (now - conn.used) > self.config.conn_keep_alive
|| (now - conn.created) > self.config.conn_lifetime
{
if let Some(timeout) = self.disconnect_timeout {
if let Some(timeout) = self.config.disconnect_timeout {
if let ConnectionType::H1(io) = conn.io {
actix_rt::spawn(CloseConnection::new(io, timeout))
}
@ -337,7 +330,7 @@ where
match Pin::new(s).poll_read(cx, &mut buf) {
Poll::Pending => (),
Poll::Ready(Ok(n)) if n > 0 => {
if let Some(timeout) = self.disconnect_timeout {
if let Some(timeout) = self.config.disconnect_timeout {
if let ConnectionType::H1(io) = io {
actix_rt::spawn(CloseConnection::new(
io, timeout,
@ -366,21 +359,21 @@ where
created,
used: Instant::now(),
});
self.check_availibility();
self.check_availability();
}
fn release_close(&mut self, io: ConnectionType<Io>) {
self.acquired -= 1;
if let Some(timeout) = self.disconnect_timeout {
if let Some(timeout) = self.config.disconnect_timeout {
if let ConnectionType::H1(io) = io {
actix_rt::spawn(CloseConnection::new(io, timeout))
}
}
self.check_availibility();
self.check_availability();
}
fn check_availibility(&self) {
if !self.waiters_queue.is_empty() && self.acquired < self.limit {
fn check_availability(&self) {
if !self.waiters_queue.is_empty() && self.acquired < self.config.limit {
self.waker.wake();
}
}
@ -422,6 +415,7 @@ where
}
}
#[pin_project]
struct ConnectorPoolSupport<T, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
@ -439,7 +433,7 @@ where
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = unsafe { self.get_unchecked_mut() };
let this = self.project();
let mut inner = this.inner.as_ref().borrow_mut();
inner.waker.register(cx.waker());
@ -478,6 +472,7 @@ where
tx,
this.inner.clone(),
this.connector.call(connect),
inner.config.clone(),
);
}
}
@ -488,10 +483,12 @@ where
}
}
#[pin_project::pin_project(PinnedDrop)]
struct OpenWaitingConnection<F, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
#[pin]
fut: F,
key: Key,
h2: Option<
@ -502,6 +499,7 @@ where
>,
rx: Option<oneshot::Sender<Result<IoConnection<Io>, ConnectError>>>,
inner: Option<Rc<RefCell<Inner<Io>>>>,
config: ConnectorConfig,
}
impl<F, Io> OpenWaitingConnection<F, Io>
@ -514,6 +512,7 @@ where
rx: oneshot::Sender<Result<IoConnection<Io>, ConnectError>>,
inner: Rc<RefCell<Inner<Io>>>,
fut: F,
config: ConnectorConfig,
) {
actix_rt::spawn(OpenWaitingConnection {
key,
@ -521,19 +520,21 @@ where
h2: None,
rx: Some(rx),
inner: Some(inner),
config,
})
}
}
impl<F, Io> Drop for OpenWaitingConnection<F, Io>
#[pin_project::pinned_drop]
impl<F, Io> PinnedDrop for OpenWaitingConnection<F, Io>
where
Io: AsyncRead + AsyncWrite + Unpin + 'static,
{
fn drop(&mut self) {
if let Some(inner) = self.inner.take() {
fn drop(self: Pin<&mut Self>) {
if let Some(inner) = self.project().inner.take() {
let mut inner = inner.as_ref().borrow_mut();
inner.release();
inner.check_availibility();
inner.check_availability();
}
}
}
@ -545,8 +546,8 @@ where
{
type Output = ();
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = unsafe { self.get_unchecked_mut() };
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.as_mut().project();
if let Some(ref mut h2) = this.h2 {
return match Pin::new(h2).poll(cx) {
@ -571,7 +572,7 @@ where
};
}
match unsafe { Pin::new_unchecked(&mut this.fut) }.poll(cx) {
match this.fut.poll(cx) {
Poll::Ready(Err(err)) => {
let _ = this.inner.take();
if let Some(rx) = this.rx.take() {
@ -589,8 +590,8 @@ where
)));
Poll::Ready(())
} else {
this.h2 = Some(handshake(io).boxed_local());
unsafe { Pin::new_unchecked(this) }.poll(cx)
*this.h2 = Some(handshake(io, this.config).boxed_local());
self.poll(cx)
}
}
Poll::Pending => Poll::Pending,

View File

@ -1,4 +1,4 @@
use std::cell::UnsafeCell;
use std::cell::RefCell;
use std::rc::Rc;
use std::task::{Context, Poll};
@ -6,37 +6,35 @@ use actix_service::Service;
#[doc(hidden)]
/// Service that allows to turn non-clone service to a service with `Clone` impl
pub(crate) struct CloneableService<T>(Rc<UnsafeCell<T>>);
///
/// # Panics
/// CloneableService might panic with some creative use of thread local storage.
/// See https://github.com/actix/actix-web/issues/1295 for example
pub(crate) struct CloneableService<T: Service>(Rc<RefCell<T>>);
impl<T> CloneableService<T> {
pub(crate) fn new(service: T) -> Self
where
T: Service,
{
Self(Rc::new(UnsafeCell::new(service)))
impl<T: Service> CloneableService<T> {
pub(crate) fn new(service: T) -> Self {
Self(Rc::new(RefCell::new(service)))
}
}
impl<T> Clone for CloneableService<T> {
impl<T: Service> Clone for CloneableService<T> {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
impl<T> Service for CloneableService<T>
where
T: Service,
{
impl<T: Service> Service for CloneableService<T> {
type Request = T::Request;
type Response = T::Response;
type Error = T::Error;
type Future = T::Future;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
unsafe { &mut *self.0.as_ref().get() }.poll_ready(cx)
self.0.borrow_mut().poll_ready(cx)
}
fn call(&mut self, req: T::Request) -> Self::Future {
unsafe { &mut *self.0.as_ref().get() }.call(req)
self.0.borrow_mut().call(req)
}
}

View File

@ -1,4 +1,4 @@
use std::cell::UnsafeCell;
use std::cell::Cell;
use std::fmt::Write;
use std::rc::Rc;
use std::time::Duration;
@ -7,7 +7,7 @@ use std::{fmt, net};
use actix_rt::time::{delay_for, delay_until, Delay, Instant};
use bytes::BytesMut;
use futures_util::{future, FutureExt};
use time;
use time::OffsetDateTime;
// "Sun, 06 Nov 1994 08:49:37 GMT".len()
const DATE_VALUE_LENGTH: usize = 29;
@ -114,7 +114,7 @@ impl ServiceConfig {
}
#[inline]
/// Return state of connection keep-alive funcitonality
/// Return state of connection keep-alive functionality
pub fn keep_alive_enabled(&self) -> bool {
self.0.ka_enabled
}
@ -211,7 +211,12 @@ impl Date {
}
fn update(&mut self) {
self.pos = 0;
write!(self, "{}", time::at_utc(time::get_time()).rfc822()).unwrap();
write!(
self,
"{}",
OffsetDateTime::now_utc().format("%a, %d %b %Y %H:%M:%S GMT")
)
.unwrap();
}
}
@ -228,24 +233,24 @@ impl fmt::Write for Date {
struct DateService(Rc<DateServiceInner>);
struct DateServiceInner {
current: UnsafeCell<Option<(Date, Instant)>>,
current: Cell<Option<(Date, Instant)>>,
}
impl DateServiceInner {
fn new() -> Self {
DateServiceInner {
current: UnsafeCell::new(None),
current: Cell::new(None),
}
}
fn reset(&self) {
unsafe { (&mut *self.current.get()).take() };
self.current.take();
}
fn update(&self) {
let now = Instant::now();
let date = Date::new();
*(unsafe { &mut *self.current.get() }) = Some((date, now));
self.current.set(Some((date, now)));
}
}
@ -255,7 +260,7 @@ impl DateService {
}
fn check_date(&self) {
if unsafe { (&*self.0.current.get()).is_none() } {
if self.0.current.get().is_none() {
self.0.update();
// periodic date update
@ -269,12 +274,12 @@ impl DateService {
fn now(&self) -> Instant {
self.check_date();
unsafe { (&*self.0.current.get()).as_ref().unwrap().1 }
self.0.current.get().unwrap().1
}
fn set_date<F: FnMut(&Date)>(&self, mut f: F) {
self.check_date();
f(&unsafe { (&*self.0.current.get()).as_ref().unwrap().0 })
f(&self.0.current.get().unwrap().0);
}
}
@ -282,6 +287,16 @@ impl DateService {
mod tests {
use super::*;
// Test modifying the date from within the closure
// passed to `set_date`
#[test]
fn test_evil_date() {
let service = DateService::new();
// Make sure that `check_date` doesn't try to spawn a task
service.0.update();
service.set_date(|_| service.0.reset());
}
#[test]
fn test_date_len() {
assert_eq!(DATE_VALUE_LENGTH, "Sun, 06 Nov 1994 08:49:37 GMT".len());

View File

@ -1,7 +1,6 @@
use std::borrow::Cow;
use chrono::Duration;
use time::Tm;
use time::{Duration, OffsetDateTime};
use super::{Cookie, SameSite};
@ -64,13 +63,13 @@ impl CookieBuilder {
/// use actix_http::cookie::Cookie;
///
/// let c = Cookie::build("foo", "bar")
/// .expires(time::now())
/// .expires(time::OffsetDateTime::now_utc())
/// .finish();
///
/// assert!(c.expires().is_some());
/// ```
#[inline]
pub fn expires(mut self, when: Tm) -> CookieBuilder {
pub fn expires(mut self, when: OffsetDateTime) -> CookieBuilder {
self.cookie.set_expires(when);
self
}
@ -108,7 +107,10 @@ impl CookieBuilder {
/// ```
#[inline]
pub fn max_age_time(mut self, value: Duration) -> CookieBuilder {
self.cookie.set_max_age(value);
// Truncate any nanoseconds from the Duration, as they aren't represented within `Max-Age`
// and would cause two otherwise identical `Cookie` instances to not be equivalent to one another.
self.cookie
.set_max_age(Duration::seconds(value.whole_seconds()));
self
}
@ -212,7 +214,7 @@ impl CookieBuilder {
///
/// ```rust
/// use actix_http::cookie::Cookie;
/// use chrono::Duration;
/// use time::Duration;
///
/// let c = Cookie::build("foo", "bar")
/// .permanent()

View File

@ -10,18 +10,26 @@ use std::fmt;
/// attribute is "Strict", then the cookie is never sent in cross-site requests.
/// If the `SameSite` attribute is "Lax", the cookie is only sent in cross-site
/// requests with "safe" HTTP methods, i.e, `GET`, `HEAD`, `OPTIONS`, `TRACE`.
/// If the `SameSite` attribute is not present (made explicit via the
/// `SameSite::None` variant), then the cookie will be sent as normal.
/// If the `SameSite` attribute is not present then the cookie will be sent as
/// normal. In some browsers, this will implicitly handle the cookie as if "Lax"
/// and in others, "None". It's best to explicitly set the `SameSite` attribute
/// to avoid inconsistent behavior.
///
/// **Note:** Depending on browser, the `Secure` attribute may be required for
/// `SameSite` "None" cookies to be accepted.
///
/// **Note:** This cookie attribute is an HTTP draft! Its meaning and definition
/// are subject to change.
///
/// More info about these draft changes can be found in the draft spec:
/// - https://tools.ietf.org/html/draft-west-cookie-incrementalism-00
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
pub enum SameSite {
/// The "Strict" `SameSite` attribute.
Strict,
/// The "Lax" `SameSite` attribute.
Lax,
/// No `SameSite` attribute.
/// The "None" `SameSite` attribute.
None,
}
@ -92,7 +100,7 @@ impl fmt::Display for SameSite {
match *self {
SameSite::Strict => write!(f, "Strict"),
SameSite::Lax => write!(f, "Lax"),
SameSite::None => Ok(()),
SameSite::None => write!(f, "None"),
}
}
}

View File

@ -1,7 +1,7 @@
use std::collections::HashSet;
use std::mem::replace;
use chrono::Duration;
use time::{Duration, OffsetDateTime};
use super::delta::DeltaCookie;
use super::Cookie;
@ -13,7 +13,7 @@ use super::secure::{Key, PrivateJar, SignedJar};
///
/// A `CookieJar` provides storage for any number of cookies. Any changes made
/// to the jar are tracked; the changes can be retrieved via the
/// [delta](#method.delta) method which returns an interator over the changes.
/// [delta](#method.delta) method which returns an iterator over the changes.
///
/// # Usage
///
@ -188,7 +188,7 @@ impl CookieJar {
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie};
/// use chrono::Duration;
/// use time::Duration;
///
/// let mut jar = CookieJar::new();
///
@ -202,7 +202,7 @@ impl CookieJar {
/// let delta: Vec<_> = jar.delta().collect();
/// assert_eq!(delta.len(), 1);
/// assert_eq!(delta[0].name(), "name");
/// assert_eq!(delta[0].max_age(), Some(Duration::seconds(0)));
/// assert_eq!(delta[0].max_age(), Some(Duration::zero()));
/// ```
///
/// Removing a new cookie does not result in a _removal_ cookie:
@ -220,8 +220,8 @@ impl CookieJar {
pub fn remove(&mut self, mut cookie: Cookie<'static>) {
if self.original_cookies.contains(cookie.name()) {
cookie.set_value("");
cookie.set_max_age(Duration::seconds(0));
cookie.set_expires(time::now() - Duration::days(365));
cookie.set_max_age(Duration::zero());
cookie.set_expires(OffsetDateTime::now_utc() - Duration::days(365));
self.delta_cookies.replace(DeltaCookie::removed(cookie));
} else {
self.delta_cookies.remove(cookie.name());
@ -239,7 +239,7 @@ impl CookieJar {
///
/// ```rust
/// use actix_http::cookie::{CookieJar, Cookie};
/// use chrono::Duration;
/// use time::Duration;
///
/// let mut jar = CookieJar::new();
///
@ -533,8 +533,8 @@ mod test {
#[test]
#[cfg(feature = "secure-cookies")]
fn delta() {
use chrono::Duration;
use std::collections::HashMap;
use time::Duration;
let mut c = CookieJar::new();
@ -556,7 +556,7 @@ mod test {
assert!(names.get("test2").unwrap().is_none());
assert!(names.get("test3").unwrap().is_none());
assert!(names.get("test4").unwrap().is_none());
assert_eq!(names.get("original").unwrap(), &Some(Duration::seconds(0)));
assert_eq!(names.get("original").unwrap(), &Some(Duration::zero()));
}
#[test]

View File

@ -47,7 +47,7 @@
//! ```
#![doc(html_root_url = "https://docs.rs/cookie/0.11")]
#![deny(missing_docs)]
#![warn(missing_docs)]
mod builder;
mod delta;
@ -65,9 +65,8 @@ use std::borrow::Cow;
use std::fmt;
use std::str::FromStr;
use chrono::Duration;
use percent_encoding::{percent_encode, AsciiSet, CONTROLS};
use time::Tm;
use time::{Duration, OffsetDateTime};
pub use self::builder::CookieBuilder;
pub use self::draft::*;
@ -104,7 +103,7 @@ enum CookieStr {
impl CookieStr {
/// Retrieves the string `self` corresponds to. If `self` is derived from
/// indexes, the corresponding subslice of `string` is returned. Otherwise,
/// indexes, the corresponding sub-slice of `string` is returned. Otherwise,
/// the concrete string is returned.
///
/// # Panics
@ -172,7 +171,7 @@ pub struct Cookie<'c> {
/// The cookie's value.
value: CookieStr,
/// The cookie's expiration, if any.
expires: Option<Tm>,
expires: Option<OffsetDateTime>,
/// The cookie's maximum age, if any.
max_age: Option<Duration>,
/// The cookie's domain, if any.
@ -479,7 +478,7 @@ impl<'c> Cookie<'c> {
/// assert_eq!(c.max_age(), None);
///
/// let c = Cookie::parse("name=value; Max-Age=3600").unwrap();
/// assert_eq!(c.max_age().map(|age| age.num_hours()), Some(1));
/// assert_eq!(c.max_age().map(|age| age.whole_hours()), Some(1));
/// ```
#[inline]
pub fn max_age(&self) -> Option<Duration> {
@ -544,10 +543,10 @@ impl<'c> Cookie<'c> {
/// let expire_time = "Wed, 21 Oct 2017 07:28:00 GMT";
/// let cookie_str = format!("name=value; Expires={}", expire_time);
/// let c = Cookie::parse(cookie_str).unwrap();
/// assert_eq!(c.expires().map(|t| t.tm_year), Some(117));
/// assert_eq!(c.expires().map(|t| t.year()), Some(2017));
/// ```
#[inline]
pub fn expires(&self) -> Option<Tm> {
pub fn expires(&self) -> Option<OffsetDateTime> {
self.expires
}
@ -645,7 +644,7 @@ impl<'c> Cookie<'c> {
///
/// ```rust
/// use actix_http::cookie::Cookie;
/// use chrono::Duration;
/// use time::Duration;
///
/// let mut c = Cookie::new("name", "value");
/// assert_eq!(c.max_age(), None);
@ -698,18 +697,19 @@ impl<'c> Cookie<'c> {
///
/// ```rust
/// use actix_http::cookie::Cookie;
/// use time::{Duration, OffsetDateTime};
///
/// let mut c = Cookie::new("name", "value");
/// assert_eq!(c.expires(), None);
///
/// let mut now = time::now();
/// now.tm_year += 1;
/// let mut now = OffsetDateTime::now();
/// now += Duration::week();
///
/// c.set_expires(now);
/// assert!(c.expires().is_some())
/// ```
#[inline]
pub fn set_expires(&mut self, time: Tm) {
pub fn set_expires(&mut self, time: OffsetDateTime) {
self.expires = Some(time);
}
@ -720,7 +720,7 @@ impl<'c> Cookie<'c> {
///
/// ```rust
/// use actix_http::cookie::Cookie;
/// use chrono::Duration;
/// use time::Duration;
///
/// let mut c = Cookie::new("foo", "bar");
/// assert!(c.expires().is_none());
@ -733,7 +733,7 @@ impl<'c> Cookie<'c> {
pub fn make_permanent(&mut self) {
let twenty_years = Duration::days(365 * 20);
self.set_max_age(twenty_years);
self.set_expires(time::now() + twenty_years);
self.set_expires(OffsetDateTime::now_utc() + twenty_years);
}
fn fmt_parameters(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
@ -746,9 +746,7 @@ impl<'c> Cookie<'c> {
}
if let Some(same_site) = self.same_site() {
if !same_site.is_none() {
write!(f, "; SameSite={}", same_site)?;
}
write!(f, "; SameSite={}", same_site)?;
}
if let Some(path) = self.path() {
@ -760,11 +758,11 @@ impl<'c> Cookie<'c> {
}
if let Some(max_age) = self.max_age() {
write!(f, "; Max-Age={}", max_age.num_seconds())?;
write!(f, "; Max-Age={}", max_age.whole_seconds())?;
}
if let Some(time) = self.expires() {
write!(f, "; Expires={}", time.rfc822())?;
write!(f, "; Expires={}", time.format("%a, %d %b %Y %H:%M:%S GMT"))?;
}
Ok(())
@ -992,7 +990,7 @@ impl<'a, 'b> PartialEq<Cookie<'b>> for Cookie<'a> {
#[cfg(test)]
mod tests {
use super::{Cookie, SameSite};
use time::strptime;
use time::PrimitiveDateTime;
#[test]
fn format() {
@ -1017,7 +1015,9 @@ mod tests {
assert_eq!(&cookie.to_string(), "foo=bar; Domain=www.rust-lang.org");
let time_str = "Wed, 21 Oct 2015 07:28:00 GMT";
let expires = strptime(time_str, "%a, %d %b %Y %H:%M:%S %Z").unwrap();
let expires = PrimitiveDateTime::parse(time_str, "%a, %d %b %Y %H:%M:%S")
.unwrap()
.assume_utc();
let cookie = Cookie::build("foo", "bar").expires(expires).finish();
assert_eq!(
&cookie.to_string(),
@ -1037,7 +1037,7 @@ mod tests {
let cookie = Cookie::build("foo", "bar")
.same_site(SameSite::None)
.finish();
assert_eq!(&cookie.to_string(), "foo=bar");
assert_eq!(&cookie.to_string(), "foo=bar; SameSite=None");
}
#[test]

View File

@ -5,11 +5,13 @@ use std::error::Error;
use std::fmt;
use std::str::Utf8Error;
use chrono::Duration;
use percent_encoding::percent_decode;
use time::Duration;
use super::{Cookie, CookieStr, SameSite};
use crate::time_parser;
/// Enum corresponding to a parsing error.
#[derive(Debug, PartialEq, Eq, Clone, Copy)]
pub enum ParseError {
@ -147,7 +149,7 @@ fn parse_inner<'c>(s: &str, decode: bool) -> Result<Cookie<'c>, ParseError> {
Ok(val) => {
// Don't panic if the max age seconds is greater than what's supported by
// `Duration`.
let val = cmp::min(val, Duration::max_value().num_seconds());
let val = cmp::min(val, Duration::max_value().whole_seconds());
Some(Duration::seconds(val))
}
Err(_) => continue,
@ -179,16 +181,14 @@ fn parse_inner<'c>(s: &str, decode: bool) -> Result<Cookie<'c>, ParseError> {
}
}
("expires", Some(v)) => {
// Try strptime with three date formats according to
// Try parsing with three date formats according to
// http://tools.ietf.org/html/rfc2616#section-3.3.1. Try
// additional ones as encountered in the real world.
let tm = time::strptime(v, "%a, %d %b %Y %H:%M:%S %Z")
.or_else(|_| time::strptime(v, "%A, %d-%b-%y %H:%M:%S %Z"))
.or_else(|_| time::strptime(v, "%a, %d-%b-%Y %H:%M:%S %Z"))
.or_else(|_| time::strptime(v, "%a %b %d %H:%M:%S %Y"));
let tm = time_parser::parse_http_date(v)
.or_else(|| time::parse(v, "%a, %d-%b-%Y %H:%M:%S").ok());
if let Ok(time) = tm {
cookie.expires = Some(time)
if let Some(time) = tm {
cookie.expires = Some(time.assume_utc())
}
}
_ => {
@ -216,8 +216,7 @@ where
#[cfg(test)]
mod tests {
use super::{Cookie, SameSite};
use chrono::Duration;
use time::strptime;
use time::{Duration, PrimitiveDateTime};
macro_rules! assert_eq_parse {
($string:expr, $expected:expr) => {
@ -377,7 +376,9 @@ mod tests {
);
let time_str = "Wed, 21 Oct 2015 07:28:00 GMT";
let expires = strptime(time_str, "%a, %d %b %Y %H:%M:%S %Z").unwrap();
let expires = PrimitiveDateTime::parse(time_str, "%a, %d %b %Y %H:%M:%S")
.unwrap()
.assume_utc();
expected.set_expires(expires);
assert_eq_parse!(
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
@ -386,7 +387,9 @@ mod tests {
);
unexpected.set_domain("foo.com");
let bad_expires = strptime(time_str, "%a, %d %b %Y %H:%S:%M %Z").unwrap();
let bad_expires = PrimitiveDateTime::parse(time_str, "%a, %d %b %Y %H:%S:%M")
.unwrap()
.assume_utc();
expected.set_expires(bad_expires);
assert_ne_parse!(
" foo=bar ;HttpOnly; Secure; Max-Age=4; Path=/foo; \
@ -414,8 +417,16 @@ mod tests {
#[test]
fn do_not_panic_on_large_max_ages() {
let max_seconds = Duration::max_value().num_seconds();
let expected = Cookie::build("foo", "bar").max_age(max_seconds).finish();
assert_eq_parse!(format!(" foo=bar; Max-Age={:?}", max_seconds + 1), expected);
let max_duration = Duration::max_value();
let expected = Cookie::build("foo", "bar")
.max_age_time(max_duration)
.finish();
let overflow_duration = max_duration
.checked_add(Duration::nanoseconds(1))
.unwrap_or(max_duration);
assert_eq_parse!(
format!(" foo=bar; Max-Age={:?}", overflow_duration.whole_seconds()),
expected
);
}
}

View File

@ -84,7 +84,7 @@ impl Key {
}
/// Generates signing/encryption keys from a secure, random source. Keys are
/// generated nondeterministically.
/// generated non-deterministically.
///
/// # Panics
///
@ -103,7 +103,7 @@ impl Key {
}
/// Attempts to generate signing/encryption keys from a secure, random
/// source. Keys are generated nondeterministically. If randomness cannot be
/// source. Keys are generated non-deterministically. If randomness cannot be
/// retrieved from the underlying operating system, returns `None`.
///
/// # Example

View File

@ -9,6 +9,7 @@ use brotli2::write::BrotliEncoder;
use bytes::Bytes;
use flate2::write::{GzEncoder, ZlibEncoder};
use futures_core::ready;
use pin_project::{pin_project, project};
use crate::body::{Body, BodySize, MessageBody, ResponseBody};
use crate::http::header::{ContentEncoding, CONTENT_ENCODING};
@ -19,8 +20,10 @@ use super::Writer;
const INPLACE: usize = 1024;
#[pin_project]
pub struct Encoder<B> {
eof: bool,
#[pin]
body: EncoderBody<B>,
encoder: Option<ContentEncoder>,
fut: Option<CpuFuture<ContentEncoder, io::Error>>,
@ -76,67 +79,88 @@ impl<B: MessageBody> Encoder<B> {
}
}
#[pin_project]
enum EncoderBody<B> {
Bytes(Bytes),
Stream(B),
BoxedStream(Box<dyn MessageBody>),
Stream(#[pin] B),
BoxedStream(Box<dyn MessageBody + Unpin>),
}
impl<B: MessageBody> MessageBody for EncoderBody<B> {
fn size(&self) -> BodySize {
match self {
EncoderBody::Bytes(ref b) => b.size(),
EncoderBody::Stream(ref b) => b.size(),
EncoderBody::BoxedStream(ref b) => b.size(),
}
}
#[project]
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
#[project]
match self.project() {
EncoderBody::Bytes(b) => {
if b.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(std::mem::replace(b, Bytes::new()))))
}
}
EncoderBody::Stream(b) => b.poll_next(cx),
EncoderBody::BoxedStream(ref mut b) => Pin::new(b.as_mut()).poll_next(cx),
}
}
}
impl<B: MessageBody> MessageBody for Encoder<B> {
fn size(&self) -> BodySize {
if self.encoder.is_none() {
match self.body {
EncoderBody::Bytes(ref b) => b.size(),
EncoderBody::Stream(ref b) => b.size(),
EncoderBody::BoxedStream(ref b) => b.size(),
}
self.body.size()
} else {
BodySize::Stream
}
}
fn poll_next(&mut self, cx: &mut Context<'_>) -> Poll<Option<Result<Bytes, Error>>> {
fn poll_next(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<Bytes, Error>>> {
let mut this = self.project();
loop {
if self.eof {
if *this.eof {
return Poll::Ready(None);
}
if let Some(ref mut fut) = self.fut {
if let Some(ref mut fut) = this.fut {
let mut encoder = match ready!(Pin::new(fut).poll(cx)) {
Ok(item) => item,
Err(e) => return Poll::Ready(Some(Err(e.into()))),
};
let chunk = encoder.take();
self.encoder = Some(encoder);
self.fut.take();
*this.encoder = Some(encoder);
this.fut.take();
if !chunk.is_empty() {
return Poll::Ready(Some(Ok(chunk)));
}
}
let result = match self.body {
EncoderBody::Bytes(ref mut b) => {
if b.is_empty() {
Poll::Ready(None)
} else {
Poll::Ready(Some(Ok(std::mem::replace(b, Bytes::new()))))
}
}
EncoderBody::Stream(ref mut b) => b.poll_next(cx),
EncoderBody::BoxedStream(ref mut b) => b.poll_next(cx),
};
let result = this.body.as_mut().poll_next(cx);
match result {
Poll::Ready(Some(Ok(chunk))) => {
if let Some(mut encoder) = self.encoder.take() {
if let Some(mut encoder) = this.encoder.take() {
if chunk.len() < INPLACE {
encoder.write(&chunk)?;
let chunk = encoder.take();
self.encoder = Some(encoder);
*this.encoder = Some(encoder);
if !chunk.is_empty() {
return Poll::Ready(Some(Ok(chunk)));
}
} else {
self.fut = Some(run(move || {
*this.fut = Some(run(move || {
encoder.write(&chunk)?;
Ok(encoder)
}));
@ -146,12 +170,12 @@ impl<B: MessageBody> MessageBody for Encoder<B> {
}
}
Poll::Ready(None) => {
if let Some(encoder) = self.encoder.take() {
if let Some(encoder) = this.encoder.take() {
let chunk = encoder.finish()?;
if chunk.is_empty() {
return Poll::Ready(None);
} else {
self.eof = true;
*this.eof = true;
return Poll::Ready(Some(Ok(chunk)));
}
} else {

View File

@ -1,5 +1,4 @@
//! Error and Result module
use std::any::TypeId;
use std::cell::RefCell;
use std::io::Write;
use std::str::Utf8Error;
@ -15,12 +14,11 @@ use derive_more::{Display, From};
pub use futures_channel::oneshot::Canceled;
use http::uri::InvalidUri;
use http::{header, Error as HttpError, StatusCode};
use httparse;
use serde::de::value::Error as DeError;
use serde_json::error::Error as JsonError;
use serde_urlencoded::ser::Error as FormError;
// re-export for convinience
// re-export for convenience
use crate::body::Body;
pub use crate::cookie::ParseError as CookieParseError;
use crate::helpers::Writer;
@ -36,7 +34,7 @@ pub type Result<T, E = Error> = result::Result<T, E>;
/// General purpose actix web error.
///
/// An actix web error is used to carry errors from `failure` or `std::error`
/// An actix web error is used to carry errors from `std::error`
/// through actix in a convenient way. It can be created through
/// converting errors with `into()`.
///
@ -83,25 +81,10 @@ pub trait ResponseError: fmt::Debug + fmt::Display {
resp.set_body(Body::from(buf))
}
#[doc(hidden)]
fn __private_get_type_id__(&self) -> TypeId
where
Self: 'static,
{
TypeId::of::<Self>()
}
downcast_get_type_id!();
}
impl dyn ResponseError + 'static {
/// Downcasts a response error to a specific type.
pub fn downcast_ref<T: ResponseError + 'static>(&self) -> Option<&T> {
if self.__private_get_type_id__() == TypeId::of::<T>() {
unsafe { Some(&*(self as *const dyn ResponseError as *const T)) }
} else {
None
}
}
}
downcast!(ResponseError);
impl fmt::Display for Error {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
@ -350,6 +333,8 @@ pub enum PayloadError {
Io(io::Error),
}
impl std::error::Error for PayloadError {}
impl From<h2::Error> for PayloadError {
fn from(err: h2::Error) -> Self {
PayloadError::Http2Payload(err)
@ -447,7 +432,7 @@ pub enum DispatchError {
Unknown,
}
/// A set of error that can occure during parsing content type
/// A set of error that can occur during parsing content type
#[derive(PartialEq, Debug, Display)]
pub enum ContentTypeError {
/// Can not parse content type
@ -458,6 +443,8 @@ pub enum ContentTypeError {
UnknownEncoding,
}
impl std::error::Error for ContentTypeError {}
/// Return `BadRequest` for `ContentTypeError`
impl ResponseError for ContentTypeError {
fn status_code(&self) -> StatusCode {
@ -963,9 +950,15 @@ where
InternalError::new(err, StatusCode::NETWORK_AUTHENTICATION_REQUIRED).into()
}
#[cfg(feature = "failure")]
/// Compatibility for `failure::Error`
impl ResponseError for fail_ure::Error {}
#[cfg(feature = "actors")]
/// `InternalServerError` for `actix::MailboxError`
/// This is supported on feature=`actors` only
impl ResponseError for actix::MailboxError {}
#[cfg(feature = "actors")]
/// `InternalServerError` for `actix::ResolverError`
/// This is supported on feature=`actors` only
impl ResponseError for actix::actors::resolver::ResolverError {}
#[cfg(test)]
mod tests {

View File

@ -6,6 +6,8 @@ use fxhash::FxHashMap;
#[derive(Default)]
/// A type map of request extensions.
pub struct Extensions {
/// Use FxHasher with a std HashMap with for faster
/// lookups on the small `TypeId` (u64 equivalent) keys.
map: FxHashMap<TypeId, Box<dyn Any>>,
}
@ -28,33 +30,30 @@ impl Extensions {
/// Check if container contains entry
pub fn contains<T: 'static>(&self) -> bool {
self.map.get(&TypeId::of::<T>()).is_some()
self.map.contains_key(&TypeId::of::<T>())
}
/// Get a reference to a type previously inserted on this `Extensions`.
pub fn get<T: 'static>(&self) -> Option<&T> {
self.map
.get(&TypeId::of::<T>())
.and_then(|boxed| (&**boxed as &(dyn Any + 'static)).downcast_ref())
.and_then(|boxed| boxed.downcast_ref())
}
/// Get a mutable reference to a type previously inserted on this `Extensions`.
pub fn get_mut<T: 'static>(&mut self) -> Option<&mut T> {
self.map
.get_mut(&TypeId::of::<T>())
.and_then(|boxed| (&mut **boxed as &mut (dyn Any + 'static)).downcast_mut())
.and_then(|boxed| boxed.downcast_mut())
}
/// Remove a type from this `Extensions`.
///
/// If a extension of this type existed, it will be returned.
pub fn remove<T: 'static>(&mut self) -> Option<T> {
self.map.remove(&TypeId::of::<T>()).and_then(|boxed| {
(boxed as Box<dyn Any + 'static>)
.downcast()
.ok()
.map(|boxed| *boxed)
})
self.map
.remove(&TypeId::of::<T>())
.and_then(|boxed| boxed.downcast().ok().map(|boxed| *boxed))
}
/// Clear the `Extensions` of all inserted extensions.
@ -70,22 +69,113 @@ impl fmt::Debug for Extensions {
}
}
#[test]
fn test_extensions() {
#[derive(Debug, PartialEq)]
struct MyType(i32);
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_remove() {
let mut map = Extensions::new();
let mut extensions = Extensions::new();
map.insert::<i8>(123);
assert!(map.get::<i8>().is_some());
extensions.insert(5i32);
extensions.insert(MyType(10));
map.remove::<i8>();
assert!(map.get::<i8>().is_none());
}
assert_eq!(extensions.get(), Some(&5i32));
assert_eq!(extensions.get_mut(), Some(&mut 5i32));
#[test]
fn test_clear() {
let mut map = Extensions::new();
assert_eq!(extensions.remove::<i32>(), Some(5i32));
assert!(extensions.get::<i32>().is_none());
map.insert::<i8>(8);
map.insert::<i16>(16);
map.insert::<i32>(32);
assert_eq!(extensions.get::<bool>(), None);
assert_eq!(extensions.get(), Some(&MyType(10)));
assert!(map.contains::<i8>());
assert!(map.contains::<i16>());
assert!(map.contains::<i32>());
map.clear();
assert!(!map.contains::<i8>());
assert!(!map.contains::<i16>());
assert!(!map.contains::<i32>());
map.insert::<i8>(10);
assert_eq!(*map.get::<i8>().unwrap(), 10);
}
#[test]
fn test_integers() {
let mut map = Extensions::new();
map.insert::<i8>(8);
map.insert::<i16>(16);
map.insert::<i32>(32);
map.insert::<i64>(64);
map.insert::<i128>(128);
map.insert::<u8>(8);
map.insert::<u16>(16);
map.insert::<u32>(32);
map.insert::<u64>(64);
map.insert::<u128>(128);
assert!(map.get::<i8>().is_some());
assert!(map.get::<i16>().is_some());
assert!(map.get::<i32>().is_some());
assert!(map.get::<i64>().is_some());
assert!(map.get::<i128>().is_some());
assert!(map.get::<u8>().is_some());
assert!(map.get::<u16>().is_some());
assert!(map.get::<u32>().is_some());
assert!(map.get::<u64>().is_some());
assert!(map.get::<u128>().is_some());
}
#[test]
fn test_composition() {
struct Magi<T>(pub T);
struct Madoka {
pub god: bool,
}
struct Homura {
pub attempts: usize,
}
struct Mami {
pub guns: usize,
}
let mut map = Extensions::new();
map.insert(Magi(Madoka { god: false }));
map.insert(Magi(Homura { attempts: 0 }));
map.insert(Magi(Mami { guns: 999 }));
assert!(!map.get::<Magi<Madoka>>().unwrap().0.god);
assert_eq!(0, map.get::<Magi<Homura>>().unwrap().0.attempts);
assert_eq!(999, map.get::<Magi<Mami>>().unwrap().0.guns);
}
#[test]
fn test_extensions() {
#[derive(Debug, PartialEq)]
struct MyType(i32);
let mut extensions = Extensions::new();
extensions.insert(5i32);
extensions.insert(MyType(10));
assert_eq!(extensions.get(), Some(&5i32));
assert_eq!(extensions.get_mut(), Some(&mut 5i32));
assert_eq!(extensions.remove::<i32>(), Some(5i32));
assert!(extensions.get::<i32>().is_none());
assert_eq!(extensions.get::<bool>(), None);
assert_eq!(extensions.get(), Some(&MyType(10)));
}
}

View File

@ -8,7 +8,6 @@ use actix_codec::Decoder;
use bytes::{Buf, Bytes, BytesMut};
use http::header::{HeaderName, HeaderValue};
use http::{header, Method, StatusCode, Uri, Version};
use httparse;
use log::{debug, error, trace};
use crate::error::ParseError;
@ -19,7 +18,7 @@ use crate::request::Request;
const MAX_BUFFER_SIZE: usize = 131_072;
const MAX_HEADERS: usize = 96;
/// Incoming messagd decoder
/// Incoming message decoder
pub(crate) struct MessageDecoder<T: MessageType>(PhantomData<T>);
#[derive(Debug)]

View File

@ -1,3 +1,6 @@
// Because MSRV is 1.39.0.
#![allow(clippy::mem_replace_with_default)]
use std::collections::VecDeque;
use std::future::Future;
use std::pin::Pin;
@ -10,6 +13,7 @@ use actix_service::Service;
use bitflags::bitflags;
use bytes::{Buf, BytesMut};
use log::{error, trace};
use pin_project::pin_project;
use crate::body::{Body, BodySize, MessageBody, ResponseBody};
use crate::cloneable::CloneableService;
@ -41,6 +45,7 @@ bitflags! {
}
}
#[pin_project::pin_project]
/// Dispatcher for HTTP/1.1 protocol
pub struct Dispatcher<T, S, B, X, U>
where
@ -52,9 +57,11 @@ where
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
#[pin]
inner: DispatcherState<T, S, B, X, U>,
}
#[pin_project]
enum DispatcherState<T, S, B, X, U>
where
S: Service<Request = Request>,
@ -65,11 +72,11 @@ where
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
Normal(InnerDispatcher<T, S, B, X, U>),
Upgrade(U::Future),
None,
Normal(#[pin] InnerDispatcher<T, S, B, X, U>),
Upgrade(Pin<Box<U::Future>>),
}
#[pin_project]
struct InnerDispatcher<T, S, B, X, U>
where
S: Service<Request = Request>,
@ -88,6 +95,7 @@ where
peer_addr: Option<net::SocketAddr>,
error: Option<DispatchError>,
#[pin]
state: State<S, B, X>,
payload: Option<PayloadSender>,
messages: VecDeque<DispatcherMessage>,
@ -95,7 +103,7 @@ where
ka_expire: Instant,
ka_timer: Option<Delay>,
io: T,
io: Option<T>,
read_buf: BytesMut,
write_buf: BytesMut,
codec: Codec,
@ -107,6 +115,7 @@ enum DispatcherMessage {
Error(Response<()>),
}
#[pin_project]
enum State<S, B, X>
where
S: Service<Request = Request>,
@ -114,9 +123,9 @@ where
B: MessageBody,
{
None,
ExpectCall(X::Future),
ServiceCall(S::Future),
SendPayload(ResponseBody<B>),
ExpectCall(Pin<Box<X::Future>>),
ServiceCall(Pin<Box<S::Future>>),
SendPayload(#[pin] ResponseBody<B>),
}
impl<S, B, X> State<S, B, X>
@ -141,7 +150,6 @@ where
}
}
}
enum PollResponse {
Upgrade(Request),
DoNothing,
@ -236,7 +244,7 @@ where
state: State::None,
error: None,
messages: VecDeque::new(),
io,
io: Some(io),
codec,
read_buf,
service,
@ -278,29 +286,35 @@ where
}
// if checked is set to true, delay disconnect until all tasks have finished.
fn client_disconnected(&mut self) {
self.flags
fn client_disconnected(self: Pin<&mut Self>) {
let this = self.project();
this.flags
.insert(Flags::READ_DISCONNECT | Flags::WRITE_DISCONNECT);
if let Some(mut payload) = self.payload.take() {
if let Some(mut payload) = this.payload.take() {
payload.set_error(PayloadError::Incomplete(None));
}
}
/// Flush stream
///
/// true - got whouldblock
/// false - didnt get whouldblock
fn poll_flush(&mut self, cx: &mut Context<'_>) -> Result<bool, DispatchError> {
/// true - got WouldBlock
/// false - didn't get WouldBlock
#[pin_project::project]
fn poll_flush(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Result<bool, DispatchError> {
if self.write_buf.is_empty() {
return Ok(false);
}
let len = self.write_buf.len();
let mut written = 0;
#[project]
let InnerDispatcher { io, write_buf, .. } = self.project();
let mut io = Pin::new(io.as_mut().unwrap());
while written < len {
match unsafe { Pin::new_unchecked(&mut self.io) }
.poll_write(cx, &self.write_buf[written..])
{
match io.as_mut().poll_write(cx, &write_buf[written..]) {
Poll::Ready(Ok(0)) => {
return Err(DispatchError::Io(io::Error::new(
io::ErrorKind::WriteZero,
@ -312,112 +326,119 @@ where
}
Poll::Pending => {
if written > 0 {
self.write_buf.advance(written);
write_buf.advance(written);
}
return Ok(true);
}
Poll::Ready(Err(err)) => return Err(DispatchError::Io(err)),
}
}
if written == self.write_buf.len() {
unsafe { self.write_buf.set_len(0) }
if written == write_buf.len() {
unsafe { write_buf.set_len(0) }
} else {
self.write_buf.advance(written);
write_buf.advance(written);
}
Ok(false)
}
fn send_response(
&mut self,
self: Pin<&mut Self>,
message: Response<()>,
body: ResponseBody<B>,
) -> Result<State<S, B, X>, DispatchError> {
self.codec
.encode(Message::Item((message, body.size())), &mut self.write_buf)
let mut this = self.project();
this.codec
.encode(Message::Item((message, body.size())), &mut this.write_buf)
.map_err(|err| {
if let Some(mut payload) = self.payload.take() {
if let Some(mut payload) = this.payload.take() {
payload.set_error(PayloadError::Incomplete(None));
}
DispatchError::Io(err)
})?;
self.flags.set(Flags::KEEPALIVE, self.codec.keepalive());
this.flags.set(Flags::KEEPALIVE, this.codec.keepalive());
match body.size() {
BodySize::None | BodySize::Empty => Ok(State::None),
_ => Ok(State::SendPayload(body)),
}
}
fn send_continue(&mut self) {
self.write_buf
fn send_continue(self: Pin<&mut Self>) {
self.project()
.write_buf
.extend_from_slice(b"HTTP/1.1 100 Continue\r\n\r\n");
}
#[pin_project::project]
fn poll_response(
&mut self,
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Result<PollResponse, DispatchError> {
loop {
let state = match self.state {
State::None => match self.messages.pop_front() {
let mut this = self.as_mut().project();
#[project]
let state = match this.state.project() {
State::None => match this.messages.pop_front() {
Some(DispatcherMessage::Item(req)) => {
Some(self.handle_request(req, cx)?)
}
Some(DispatcherMessage::Error(res)) => {
Some(self.send_response(res, ResponseBody::Other(Body::Empty))?)
Some(self.as_mut().handle_request(req, cx)?)
}
Some(DispatcherMessage::Error(res)) => Some(
self.as_mut()
.send_response(res, ResponseBody::Other(Body::Empty))?,
),
Some(DispatcherMessage::Upgrade(req)) => {
return Ok(PollResponse::Upgrade(req));
}
None => None,
},
State::ExpectCall(ref mut fut) => {
match unsafe { Pin::new_unchecked(fut) }.poll(cx) {
Poll::Ready(Ok(req)) => {
self.send_continue();
self.state = State::ServiceCall(self.service.call(req));
continue;
}
Poll::Ready(Err(e)) => {
let res: Response = e.into().into();
let (res, body) = res.replace_body(());
Some(self.send_response(res, body.into_body())?)
}
Poll::Pending => None,
State::ExpectCall(fut) => match fut.as_mut().poll(cx) {
Poll::Ready(Ok(req)) => {
self.as_mut().send_continue();
this = self.as_mut().project();
this.state
.set(State::ServiceCall(Box::pin(this.service.call(req))));
continue;
}
}
State::ServiceCall(ref mut fut) => {
match unsafe { Pin::new_unchecked(fut) }.poll(cx) {
Poll::Ready(Ok(res)) => {
let (res, body) = res.into().replace_body(());
self.state = self.send_response(res, body)?;
continue;
}
Poll::Ready(Err(e)) => {
let res: Response = e.into().into();
let (res, body) = res.replace_body(());
Some(self.send_response(res, body.into_body())?)
}
Poll::Pending => None,
Poll::Ready(Err(e)) => {
let res: Response = e.into().into();
let (res, body) = res.replace_body(());
Some(self.as_mut().send_response(res, body.into_body())?)
}
}
State::SendPayload(ref mut stream) => {
Poll::Pending => None,
},
State::ServiceCall(fut) => match fut.as_mut().poll(cx) {
Poll::Ready(Ok(res)) => {
let (res, body) = res.into().replace_body(());
let state = self.as_mut().send_response(res, body)?;
this = self.as_mut().project();
this.state.set(state);
continue;
}
Poll::Ready(Err(e)) => {
let res: Response = e.into().into();
let (res, body) = res.replace_body(());
Some(self.as_mut().send_response(res, body.into_body())?)
}
Poll::Pending => None,
},
State::SendPayload(mut stream) => {
loop {
if self.write_buf.len() < HW_BUFFER_SIZE {
match stream.poll_next(cx) {
if this.write_buf.len() < HW_BUFFER_SIZE {
match stream.as_mut().poll_next(cx) {
Poll::Ready(Some(Ok(item))) => {
self.codec.encode(
this.codec.encode(
Message::Chunk(Some(item)),
&mut self.write_buf,
&mut this.write_buf,
)?;
continue;
}
Poll::Ready(None) => {
self.codec.encode(
this.codec.encode(
Message::Chunk(None),
&mut self.write_buf,
&mut this.write_buf,
)?;
self.state = State::None;
this = self.as_mut().project();
this.state.set(State::None);
}
Poll::Ready(Some(Err(_))) => {
return Err(DispatchError::Unknown)
@ -433,9 +454,11 @@ where
}
};
this = self.as_mut().project();
// set new state
if let Some(state) = state {
self.state = state;
this.state.set(state);
if !self.state.is_empty() {
continue;
}
@ -443,7 +466,7 @@ where
// if read-backpressure is enabled and we consumed some data.
// we may read more data and retry
if self.state.is_call() {
if self.poll_request(cx)? {
if self.as_mut().poll_request(cx)? {
continue;
}
} else if !self.messages.is_empty() {
@ -457,16 +480,16 @@ where
}
fn handle_request(
&mut self,
mut self: Pin<&mut Self>,
req: Request,
cx: &mut Context<'_>,
) -> Result<State<S, B, X>, DispatchError> {
// Handle `EXPECT: 100-Continue` header
let req = if req.head().expect() {
let mut task = self.expect.call(req);
match unsafe { Pin::new_unchecked(&mut task) }.poll(cx) {
let mut task = Box::pin(self.as_mut().project().expect.call(req));
match task.as_mut().poll(cx) {
Poll::Ready(Ok(req)) => {
self.send_continue();
self.as_mut().send_continue();
req
}
Poll::Pending => return Ok(State::ExpectCall(task)),
@ -482,8 +505,8 @@ where
};
// Call service
let mut task = self.service.call(req);
match unsafe { Pin::new_unchecked(&mut task) }.poll(cx) {
let mut task = Box::pin(self.as_mut().project().service.call(req));
match task.as_mut().poll(cx) {
Poll::Ready(Ok(res)) => {
let (res, body) = res.into().replace_body(());
self.send_response(res, body)
@ -499,7 +522,7 @@ where
/// Process one incoming requests
pub(self) fn poll_request(
&mut self,
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Result<bool, DispatchError> {
// limit a mount of non processed requests
@ -508,24 +531,25 @@ where
}
let mut updated = false;
let mut this = self.as_mut().project();
loop {
match self.codec.decode(&mut self.read_buf) {
match this.codec.decode(&mut this.read_buf) {
Ok(Some(msg)) => {
updated = true;
self.flags.insert(Flags::STARTED);
this.flags.insert(Flags::STARTED);
match msg {
Message::Item(mut req) => {
let pl = self.codec.message_type();
req.head_mut().peer_addr = self.peer_addr;
let pl = this.codec.message_type();
req.head_mut().peer_addr = *this.peer_addr;
// set on_connect data
if let Some(ref on_connect) = self.on_connect {
if let Some(ref on_connect) = this.on_connect {
on_connect.set(&mut req.extensions_mut());
}
if pl == MessageType::Stream && self.upgrade.is_some() {
self.messages.push_back(DispatcherMessage::Upgrade(req));
if pl == MessageType::Stream && this.upgrade.is_some() {
this.messages.push_back(DispatcherMessage::Upgrade(req));
break;
}
if pl == MessageType::Payload || pl == MessageType::Stream {
@ -533,41 +557,43 @@ where
let (req1, _) =
req.replace_payload(crate::Payload::H1(pl));
req = req1;
self.payload = Some(ps);
*this.payload = Some(ps);
}
// handle request early
if self.state.is_empty() {
self.state = self.handle_request(req, cx)?;
if this.state.is_empty() {
let state = self.as_mut().handle_request(req, cx)?;
this = self.as_mut().project();
this.state.set(state);
} else {
self.messages.push_back(DispatcherMessage::Item(req));
this.messages.push_back(DispatcherMessage::Item(req));
}
}
Message::Chunk(Some(chunk)) => {
if let Some(ref mut payload) = self.payload {
if let Some(ref mut payload) = this.payload {
payload.feed_data(chunk);
} else {
error!(
"Internal server error: unexpected payload chunk"
);
self.flags.insert(Flags::READ_DISCONNECT);
self.messages.push_back(DispatcherMessage::Error(
this.flags.insert(Flags::READ_DISCONNECT);
this.messages.push_back(DispatcherMessage::Error(
Response::InternalServerError().finish().drop_body(),
));
self.error = Some(DispatchError::InternalError);
*this.error = Some(DispatchError::InternalError);
break;
}
}
Message::Chunk(None) => {
if let Some(mut payload) = self.payload.take() {
if let Some(mut payload) = this.payload.take() {
payload.feed_eof();
} else {
error!("Internal server error: unexpected eof");
self.flags.insert(Flags::READ_DISCONNECT);
self.messages.push_back(DispatcherMessage::Error(
this.flags.insert(Flags::READ_DISCONNECT);
this.messages.push_back(DispatcherMessage::Error(
Response::InternalServerError().finish().drop_body(),
));
self.error = Some(DispatchError::InternalError);
*this.error = Some(DispatchError::InternalError);
break;
}
}
@ -575,44 +601,49 @@ where
}
Ok(None) => break,
Err(ParseError::Io(e)) => {
self.client_disconnected();
self.error = Some(DispatchError::Io(e));
self.as_mut().client_disconnected();
this = self.as_mut().project();
*this.error = Some(DispatchError::Io(e));
break;
}
Err(e) => {
if let Some(mut payload) = self.payload.take() {
if let Some(mut payload) = this.payload.take() {
payload.set_error(PayloadError::EncodingCorrupted);
}
// Malformed requests should be responded with 400
self.messages.push_back(DispatcherMessage::Error(
this.messages.push_back(DispatcherMessage::Error(
Response::BadRequest().finish().drop_body(),
));
self.flags.insert(Flags::READ_DISCONNECT);
self.error = Some(e.into());
this.flags.insert(Flags::READ_DISCONNECT);
*this.error = Some(e.into());
break;
}
}
}
if updated && self.ka_timer.is_some() {
if let Some(expire) = self.codec.config().keep_alive_expire() {
self.ka_expire = expire;
if updated && this.ka_timer.is_some() {
if let Some(expire) = this.codec.config().keep_alive_expire() {
*this.ka_expire = expire;
}
}
Ok(updated)
}
/// keep-alive timer
fn poll_keepalive(&mut self, cx: &mut Context<'_>) -> Result<(), DispatchError> {
if self.ka_timer.is_none() {
fn poll_keepalive(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Result<(), DispatchError> {
let mut this = self.as_mut().project();
if this.ka_timer.is_none() {
// shutdown timeout
if self.flags.contains(Flags::SHUTDOWN) {
if let Some(interval) = self.codec.config().client_disconnect_timer() {
self.ka_timer = Some(delay_until(interval));
if this.flags.contains(Flags::SHUTDOWN) {
if let Some(interval) = this.codec.config().client_disconnect_timer() {
*this.ka_timer = Some(delay_until(interval));
} else {
self.flags.insert(Flags::READ_DISCONNECT);
if let Some(mut payload) = self.payload.take() {
this.flags.insert(Flags::READ_DISCONNECT);
if let Some(mut payload) = this.payload.take() {
payload.set_error(PayloadError::Incomplete(None));
}
return Ok(());
@ -622,55 +653,56 @@ where
}
}
match Pin::new(&mut self.ka_timer.as_mut().unwrap()).poll(cx) {
match Pin::new(&mut this.ka_timer.as_mut().unwrap()).poll(cx) {
Poll::Ready(()) => {
// if we get timeout during shutdown, drop connection
if self.flags.contains(Flags::SHUTDOWN) {
if this.flags.contains(Flags::SHUTDOWN) {
return Err(DispatchError::DisconnectTimeout);
} else if self.ka_timer.as_mut().unwrap().deadline() >= self.ka_expire {
} else if this.ka_timer.as_mut().unwrap().deadline() >= *this.ka_expire {
// check for any outstanding tasks
if self.state.is_empty() && self.write_buf.is_empty() {
if self.flags.contains(Flags::STARTED) {
if this.state.is_empty() && this.write_buf.is_empty() {
if this.flags.contains(Flags::STARTED) {
trace!("Keep-alive timeout, close connection");
self.flags.insert(Flags::SHUTDOWN);
this.flags.insert(Flags::SHUTDOWN);
// start shutdown timer
if let Some(deadline) =
self.codec.config().client_disconnect_timer()
this.codec.config().client_disconnect_timer()
{
if let Some(mut timer) = self.ka_timer.as_mut() {
if let Some(mut timer) = this.ka_timer.as_mut() {
timer.reset(deadline);
let _ = Pin::new(&mut timer).poll(cx);
}
} else {
// no shutdown timeout, drop socket
self.flags.insert(Flags::WRITE_DISCONNECT);
this.flags.insert(Flags::WRITE_DISCONNECT);
return Ok(());
}
} else {
// timeout on first request (slow request) return 408
if !self.flags.contains(Flags::STARTED) {
if !this.flags.contains(Flags::STARTED) {
trace!("Slow request timeout");
let _ = self.send_response(
let _ = self.as_mut().send_response(
Response::RequestTimeout().finish().drop_body(),
ResponseBody::Other(Body::Empty),
);
this = self.as_mut().project();
} else {
trace!("Keep-alive connection timeout");
}
self.flags.insert(Flags::STARTED | Flags::SHUTDOWN);
self.state = State::None;
this.flags.insert(Flags::STARTED | Flags::SHUTDOWN);
this.state.set(State::None);
}
} else if let Some(deadline) =
self.codec.config().keep_alive_expire()
this.codec.config().keep_alive_expire()
{
if let Some(mut timer) = self.ka_timer.as_mut() {
if let Some(mut timer) = this.ka_timer.as_mut() {
timer.reset(deadline);
let _ = Pin::new(&mut timer).poll(cx);
}
}
} else if let Some(mut timer) = self.ka_timer.as_mut() {
timer.reset(self.ka_expire);
} else if let Some(mut timer) = this.ka_timer.as_mut() {
timer.reset(*this.ka_expire);
let _ = Pin::new(&mut timer).poll(cx);
}
}
@ -681,20 +713,6 @@ where
}
}
impl<T, S, B, X, U> Unpin for Dispatcher<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request = Request>,
S::Error: Into<Error>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request = Request, Response = Request>,
X::Error: Into<Error>,
U: Service<Request = (Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display,
{
}
impl<T, S, B, X, U> Future for Dispatcher<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
@ -709,22 +727,29 @@ where
{
type Output = Result<(), DispatchError>;
#[pin_project::project]
#[inline]
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
match self.as_mut().inner {
DispatcherState::Normal(ref mut inner) => {
inner.poll_keepalive(cx)?;
let this = self.as_mut().project();
#[project]
match this.inner.project() {
DispatcherState::Normal(mut inner) => {
inner.as_mut().poll_keepalive(cx)?;
if inner.flags.contains(Flags::SHUTDOWN) {
if inner.flags.contains(Flags::WRITE_DISCONNECT) {
Poll::Ready(Ok(()))
} else {
// flush buffer
inner.poll_flush(cx)?;
if !inner.write_buf.is_empty() {
inner.as_mut().poll_flush(cx)?;
if !inner.write_buf.is_empty() || inner.io.is_none() {
Poll::Pending
} else {
match Pin::new(&mut inner.io).poll_shutdown(cx) {
match Pin::new(inner.project().io)
.as_pin_mut()
.unwrap()
.poll_shutdown(cx)
{
Poll::Ready(res) => {
Poll::Ready(res.map_err(DispatchError::from))
}
@ -736,53 +761,61 @@ where
// read socket into a buf
let should_disconnect =
if !inner.flags.contains(Flags::READ_DISCONNECT) {
read_available(cx, &mut inner.io, &mut inner.read_buf)?
let mut inner_p = inner.as_mut().project();
read_available(
cx,
inner_p.io.as_mut().unwrap(),
&mut inner_p.read_buf,
)?
} else {
None
};
inner.poll_request(cx)?;
inner.as_mut().poll_request(cx)?;
if let Some(true) = should_disconnect {
inner.flags.insert(Flags::READ_DISCONNECT);
if let Some(mut payload) = inner.payload.take() {
let inner_p = inner.as_mut().project();
inner_p.flags.insert(Flags::READ_DISCONNECT);
if let Some(mut payload) = inner_p.payload.take() {
payload.feed_eof();
}
};
loop {
let inner_p = inner.as_mut().project();
let remaining =
inner.write_buf.capacity() - inner.write_buf.len();
inner_p.write_buf.capacity() - inner_p.write_buf.len();
if remaining < LW_BUFFER_SIZE {
inner.write_buf.reserve(HW_BUFFER_SIZE - remaining);
inner_p.write_buf.reserve(HW_BUFFER_SIZE - remaining);
}
let result = inner.poll_response(cx)?;
let result = inner.as_mut().poll_response(cx)?;
let drain = result == PollResponse::DrainWriteBuf;
// switch to upgrade handler
if let PollResponse::Upgrade(req) = result {
if let DispatcherState::Normal(inner) =
std::mem::replace(&mut self.inner, DispatcherState::None)
{
let mut parts = FramedParts::with_read_buf(
inner.io,
inner.codec,
inner.read_buf,
);
parts.write_buf = inner.write_buf;
let framed = Framed::from_parts(parts);
self.inner = DispatcherState::Upgrade(
inner.upgrade.unwrap().call((req, framed)),
);
return self.poll(cx);
} else {
panic!()
}
let inner_p = inner.as_mut().project();
let mut parts = FramedParts::with_read_buf(
inner_p.io.take().unwrap(),
std::mem::replace(inner_p.codec, Codec::default()),
std::mem::replace(inner_p.read_buf, BytesMut::default()),
);
parts.write_buf = std::mem::replace(
inner_p.write_buf,
BytesMut::default(),
);
let framed = Framed::from_parts(parts);
let upgrade =
inner_p.upgrade.take().unwrap().call((req, framed));
self.as_mut()
.project()
.inner
.set(DispatcherState::Upgrade(Box::pin(upgrade)));
return self.poll(cx);
}
// we didnt get WouldBlock from write operation,
// we didn't get WouldBlock from write operation,
// so data get written to kernel completely (OSX)
// and we have to write again otherwise response can get stuck
if inner.poll_flush(cx)? || !drain {
if inner.as_mut().poll_flush(cx)? || !drain {
break;
}
}
@ -794,25 +827,26 @@ where
let is_empty = inner.state.is_empty();
let inner_p = inner.as_mut().project();
// read half is closed and we do not processing any responses
if inner.flags.contains(Flags::READ_DISCONNECT) && is_empty {
inner.flags.insert(Flags::SHUTDOWN);
if inner_p.flags.contains(Flags::READ_DISCONNECT) && is_empty {
inner_p.flags.insert(Flags::SHUTDOWN);
}
// keep-alive and stream errors
if is_empty && inner.write_buf.is_empty() {
if let Some(err) = inner.error.take() {
if is_empty && inner_p.write_buf.is_empty() {
if let Some(err) = inner_p.error.take() {
Poll::Ready(Err(err))
}
// disconnect if keep-alive is not enabled
else if inner.flags.contains(Flags::STARTED)
&& !inner.flags.intersects(Flags::KEEPALIVE)
else if inner_p.flags.contains(Flags::STARTED)
&& !inner_p.flags.intersects(Flags::KEEPALIVE)
{
inner.flags.insert(Flags::SHUTDOWN);
inner_p.flags.insert(Flags::SHUTDOWN);
self.poll(cx)
}
// disconnect if shutdown
else if inner.flags.contains(Flags::SHUTDOWN) {
else if inner_p.flags.contains(Flags::SHUTDOWN) {
self.poll(cx)
} else {
Poll::Pending
@ -822,13 +856,10 @@ where
}
}
}
DispatcherState::Upgrade(ref mut fut) => {
unsafe { Pin::new_unchecked(fut) }.poll(cx).map_err(|e| {
error!("Upgrade handler error: {}", e);
DispatchError::Upgrade
})
}
DispatcherState::None => panic!(),
DispatcherState::Upgrade(fut) => fut.as_mut().poll(cx).map_err(|e| {
error!("Upgrade handler error: {}", e);
DispatchError::Upgrade
}),
}
}
}
@ -918,9 +949,12 @@ mod tests {
Poll::Ready(res) => assert!(res.is_err()),
}
if let DispatcherState::Normal(ref inner) = h1.inner {
if let DispatcherState::Normal(ref mut inner) = h1.inner {
assert!(inner.flags.contains(Flags::READ_DISCONNECT));
assert_eq!(&inner.io.write_buf[..26], b"HTTP/1.1 400 Bad Request\r\n");
assert_eq!(
&inner.io.take().unwrap().write_buf[..26],
b"HTTP/1.1 400 Bad Request\r\n"
);
}
})
.await;

View File

@ -364,7 +364,7 @@ where
}
/// `Service` implementation for HTTP1 transport
pub struct H1ServiceHandler<T, S, B, X, U> {
pub struct H1ServiceHandler<T, S: Service, B, X: Service, U: Service> {
srv: CloneableService<S>,
expect: CloneableService<X>,
upgrade: Option<CloneableService<U>>,

View File

@ -13,6 +13,7 @@ use crate::response::Response;
#[pin_project::pin_project]
pub struct SendResponse<T, B> {
res: Option<Message<(Response<()>, BodySize)>>,
#[pin]
body: Option<ResponseBody<B>>,
framed: Option<Framed<T, Codec>>,
}
@ -35,24 +36,27 @@ where
impl<T, B> Future for SendResponse<T, B>
where
T: AsyncRead + AsyncWrite,
B: MessageBody,
B: MessageBody + Unpin,
{
type Output = Result<Framed<T, Codec>, Error>;
// TODO: rethink if we need loops in polls
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
let mut this = self.project();
let mut body_done = this.body.is_none();
loop {
let mut body_ready = this.body.is_some();
let mut body_ready = !body_done;
let framed = this.framed.as_mut().unwrap();
// send body
if this.res.is_none() && this.body.is_some() {
while body_ready && this.body.is_some() && !framed.is_write_buf_full() {
match this.body.as_mut().unwrap().poll_next(cx)? {
if this.res.is_none() && body_ready {
while body_ready && !body_done && !framed.is_write_buf_full() {
match this.body.as_mut().as_pin_mut().unwrap().poll_next(cx)? {
Poll::Ready(item) => {
// body is done
if item.is_none() {
// body is done when item is None
body_done = item.is_none();
if body_done {
let _ = this.body.take();
}
framed.write(Message::Chunk(item))?;
@ -82,7 +86,7 @@ where
continue;
}
if this.body.is_some() {
if !body_done {
if body_ready {
continue;
} else {

View File

@ -158,15 +158,17 @@ where
#[pin_project::pin_project]
struct ServiceResponse<F, I, E, B> {
#[pin]
state: ServiceResponseState<F, B>,
config: ServiceConfig,
buffer: Option<Bytes>,
_t: PhantomData<(I, E)>,
}
#[pin_project::pin_project]
enum ServiceResponseState<F, B> {
ServiceCall(F, Option<SendResponse<Bytes>>),
SendPayload(SendStream<Bytes>, ResponseBody<B>),
ServiceCall(#[pin] F, Option<SendResponse<Bytes>>),
SendPayload(SendStream<Bytes>, #[pin] ResponseBody<B>),
}
impl<F, I, E, B> ServiceResponse<F, I, E, B>
@ -247,68 +249,66 @@ where
{
type Output = ();
#[pin_project::project]
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.as_mut().project();
match this.state {
ServiceResponseState::ServiceCall(ref mut call, ref mut send) => {
match unsafe { Pin::new_unchecked(call) }.poll(cx) {
Poll::Ready(Ok(res)) => {
let (res, body) = res.into().replace_body(());
#[project]
match this.state.project() {
ServiceResponseState::ServiceCall(call, send) => match call.poll(cx) {
Poll::Ready(Ok(res)) => {
let (res, body) = res.into().replace_body(());
let mut send = send.take().unwrap();
let mut size = body.size();
let h2_res =
self.as_mut().prepare_response(res.head(), &mut size);
this = self.as_mut().project();
let mut send = send.take().unwrap();
let mut size = body.size();
let h2_res = self.as_mut().prepare_response(res.head(), &mut size);
this = self.as_mut().project();
let stream = match send.send_response(h2_res, size.is_eof()) {
Err(e) => {
trace!("Error sending h2 response: {:?}", e);
return Poll::Ready(());
}
Ok(stream) => stream,
};
if size.is_eof() {
Poll::Ready(())
} else {
*this.state =
ServiceResponseState::SendPayload(stream, body);
self.poll(cx)
let stream = match send.send_response(h2_res, size.is_eof()) {
Err(e) => {
trace!("Error sending h2 response: {:?}", e);
return Poll::Ready(());
}
}
Poll::Pending => Poll::Pending,
Poll::Ready(Err(e)) => {
let res: Response = e.into().into();
let (res, body) = res.replace_body(());
Ok(stream) => stream,
};
let mut send = send.take().unwrap();
let mut size = body.size();
let h2_res =
self.as_mut().prepare_response(res.head(), &mut size);
this = self.as_mut().project();
let stream = match send.send_response(h2_res, size.is_eof()) {
Err(e) => {
trace!("Error sending h2 response: {:?}", e);
return Poll::Ready(());
}
Ok(stream) => stream,
};
if size.is_eof() {
Poll::Ready(())
} else {
*this.state = ServiceResponseState::SendPayload(
stream,
body.into_body(),
);
self.poll(cx)
}
if size.is_eof() {
Poll::Ready(())
} else {
this.state
.set(ServiceResponseState::SendPayload(stream, body));
self.poll(cx)
}
}
}
Poll::Pending => Poll::Pending,
Poll::Ready(Err(e)) => {
let res: Response = e.into().into();
let (res, body) = res.replace_body(());
let mut send = send.take().unwrap();
let mut size = body.size();
let h2_res = self.as_mut().prepare_response(res.head(), &mut size);
this = self.as_mut().project();
let stream = match send.send_response(h2_res, size.is_eof()) {
Err(e) => {
trace!("Error sending h2 response: {:?}", e);
return Poll::Ready(());
}
Ok(stream) => stream,
};
if size.is_eof() {
Poll::Ready(())
} else {
this.state.set(ServiceResponseState::SendPayload(
stream,
body.into_body(),
));
self.poll(cx)
}
}
},
ServiceResponseState::SendPayload(ref mut stream, ref mut body) => loop {
loop {
if let Some(ref mut buffer) = this.buffer {
@ -335,7 +335,7 @@ where
}
}
} else {
match body.poll_next(cx) {
match body.as_mut().poll_next(cx) {
Poll::Pending => return Poll::Pending,
Poll::Ready(None) => {
if let Err(e) = stream.send_data(Bytes::new(), true) {

View File

@ -83,13 +83,11 @@ where
Error = DispatchError,
InitError = S::InitError,
> {
pipeline_factory(fn_factory(|| {
async {
Ok::<_, S::InitError>(fn_service(|io: TcpStream| {
let peer_addr = io.peer_addr().ok();
ok::<_, DispatchError>((io, peer_addr))
}))
}
pipeline_factory(fn_factory(|| async {
Ok::<_, S::InitError>(fn_service(|io: TcpStream| {
let peer_addr = io.peer_addr().ok();
ok::<_, DispatchError>((io, peer_addr))
}))
}))
.and_then(self)
}
@ -246,7 +244,7 @@ where
}
/// `Service` implementation for http/2 transport
pub struct H2ServiceHandler<T, S, B> {
pub struct H2ServiceHandler<T, S: Service, B> {
srv: CloneableService<S>,
cfg: ServiceConfig,
on_connect: Option<rc::Rc<dyn Fn(&T) -> Box<dyn DataFactory>>>,

View File

@ -63,7 +63,7 @@ header! {
(AcceptCharset, ACCEPT_CHARSET) => (QualityItem<Charset>)+
test_accept_charset {
/// Test case from RFC
// Test case from RFC
test_header!(test1, vec![b"iso-8859-5, unicode-1-1;q=0.8"]);
}
}

View File

@ -90,40 +90,40 @@ pub enum DispositionParam {
/// [RFC6266](https://tools.ietf.org/html/rfc6266) as *token "=" value*. Recipients should
/// ignore unrecognizable parameters.
Unknown(String, String),
/// An unrecognized extended paramater as defined in
/// An unrecognized extended parameter as defined in
/// [RFC5987](https://tools.ietf.org/html/rfc5987) as *ext-parameter*, in
/// [RFC6266](https://tools.ietf.org/html/rfc6266) as *ext-token "=" ext-value*. The single
/// trailling asterisk is not included. Recipients should ignore unrecognizable parameters.
/// trailing asterisk is not included. Recipients should ignore unrecognizable parameters.
UnknownExt(String, ExtendedValue),
}
impl DispositionParam {
/// Returns `true` if the paramater is [`Name`](DispositionParam::Name).
/// Returns `true` if the parameter is [`Name`](DispositionParam::Name).
#[inline]
pub fn is_name(&self) -> bool {
self.as_name().is_some()
}
/// Returns `true` if the paramater is [`Filename`](DispositionParam::Filename).
/// Returns `true` if the parameter is [`Filename`](DispositionParam::Filename).
#[inline]
pub fn is_filename(&self) -> bool {
self.as_filename().is_some()
}
/// Returns `true` if the paramater is [`FilenameExt`](DispositionParam::FilenameExt).
/// Returns `true` if the parameter is [`FilenameExt`](DispositionParam::FilenameExt).
#[inline]
pub fn is_filename_ext(&self) -> bool {
self.as_filename_ext().is_some()
}
/// Returns `true` if the paramater is [`Unknown`](DispositionParam::Unknown) and the `name`
/// Returns `true` if the parameter is [`Unknown`](DispositionParam::Unknown) and the `name`
#[inline]
/// matches.
pub fn is_unknown<T: AsRef<str>>(&self, name: T) -> bool {
self.as_unknown(name).is_some()
}
/// Returns `true` if the paramater is [`UnknownExt`](DispositionParam::UnknownExt) and the
/// Returns `true` if the parameter is [`UnknownExt`](DispositionParam::UnknownExt) and the
/// `name` matches.
#[inline]
pub fn is_unknown_ext<T: AsRef<str>>(&self, name: T) -> bool {
@ -373,7 +373,7 @@ impl ContentDisposition {
let param = if param_name.eq_ignore_ascii_case("name") {
DispositionParam::Name(value)
} else if param_name.eq_ignore_ascii_case("filename") {
// See also comments in test_from_raw_uncessary_percent_decode.
// See also comments in test_from_raw_unnecessary_percent_decode.
DispositionParam::Filename(value)
} else {
DispositionParam::Unknown(param_name.to_owned(), value)
@ -423,7 +423,7 @@ impl ContentDisposition {
/// Return the value of *name* if exists.
pub fn get_name(&self) -> Option<&str> {
self.parameters.iter().filter_map(|p| p.as_name()).nth(0)
self.parameters.iter().filter_map(|p| p.as_name()).next()
}
/// Return the value of *filename* if exists.
@ -431,7 +431,7 @@ impl ContentDisposition {
self.parameters
.iter()
.filter_map(|p| p.as_filename())
.nth(0)
.next()
}
/// Return the value of *filename\** if exists.
@ -439,7 +439,7 @@ impl ContentDisposition {
self.parameters
.iter()
.filter_map(|p| p.as_filename_ext())
.nth(0)
.next()
}
/// Return the value of the parameter which the `name` matches.
@ -448,7 +448,7 @@ impl ContentDisposition {
self.parameters
.iter()
.filter_map(|p| p.as_unknown(name))
.nth(0)
.next()
}
/// Return the value of the extended parameter which the `name` matches.
@ -457,7 +457,7 @@ impl ContentDisposition {
self.parameters
.iter()
.filter_map(|p| p.as_unknown_ext(name))
.nth(0)
.next()
}
}
@ -529,7 +529,7 @@ impl fmt::Display for DispositionParam {
// ; to use within parameter values
//
//
// See also comments in test_from_raw_uncessary_percent_decode.
// See also comments in test_from_raw_unnecessary_percent_decode.
lazy_static! {
static ref RE: Regex = Regex::new("[\x00-\x08\x10-\x1F\x7F\"\\\\]").unwrap();
}
@ -676,7 +676,7 @@ mod tests {
fn test_from_raw_unordered() {
let a = HeaderValue::from_static(
"form-data; dummy=3; filename=\"sample.png\" ; name=upload;",
// Actually, a trailling semolocon is not compliant. But it is fine to accept.
// Actually, a trailing semicolon is not compliant. But it is fine to accept.
);
let a: ContentDisposition = ContentDisposition::from_raw(&a).unwrap();
let b = ContentDisposition {
@ -833,7 +833,7 @@ mod tests {
}
#[test]
fn test_from_raw_uncessary_percent_decode() {
fn test_from_raw_unnecessary_percent_decode() {
// In fact, RFC7578 (multipart/form-data) Section 2 and 4.2 suggests that filename with
// non-ASCII characters MAY be percent-encoded.
// On the contrary, RFC6266 or other RFCs related to Content-Disposition response header

View File

@ -7,7 +7,7 @@ use header::{Header, Raw};
/// `Range` header, defined in [RFC7233](https://tools.ietf.org/html/rfc7233#section-3.1)
///
/// The "Range" header field on a GET request modifies the method
/// semantics to request transfer of only one or more subranges of the
/// semantics to request transfer of only one or more sub-ranges of the
/// selected representation data, rather than the entire selected
/// representation data.
///
@ -183,13 +183,13 @@ impl fmt::Display for Range {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match *self {
Range::Bytes(ref ranges) => {
try!(write!(f, "bytes="));
write!(f, "bytes=")?;
for (i, range) in ranges.iter().enumerate() {
if i != 0 {
try!(f.write_str(","));
f.write_str(",")?;
}
try!(Display::fmt(range, f));
Display::fmt(range, f)?;
}
Ok(())
}
@ -214,9 +214,9 @@ impl FromStr for Range {
}
Ok(Range::Bytes(ranges))
}
(Some(unit), Some(range_str)) if unit != "" && range_str != "" => Ok(
Range::Unregistered(unit.to_owned(), range_str.to_owned()),
),
(Some(unit), Some(range_str)) if unit != "" && range_str != "" => {
Ok(Range::Unregistered(unit.to_owned(), range_str.to_owned()))
}
_ => Err(::Error::Header),
}
}
@ -229,7 +229,8 @@ impl FromStr for ByteRangeSpec {
let mut parts = s.splitn(2, '-');
match (parts.next(), parts.next()) {
(Some(""), Some(end)) => end.parse()
(Some(""), Some(end)) => end
.parse()
.or(Err(::Error::Header))
.map(ByteRangeSpec::Last),
(Some(start), Some("")) => start
@ -272,163 +273,138 @@ impl Header for Range {
}
}
#[test]
fn test_parse_bytes_range_valid() {
let r: Range = Header::parse_header(&"bytes=1-100".into()).unwrap();
let r2: Range = Header::parse_header(&"bytes=1-100,-".into()).unwrap();
let r3 = Range::bytes(1, 100);
assert_eq!(r, r2);
assert_eq!(r2, r3);
#[cfg(test)]
mod tests {
use super::*;
let r: Range = Header::parse_header(&"bytes=1-100,200-".into()).unwrap();
let r2: Range =
Header::parse_header(&"bytes= 1-100 , 101-xxx, 200- ".into()).unwrap();
let r3 = Range::Bytes(vec![
ByteRangeSpec::FromTo(1, 100),
ByteRangeSpec::AllFrom(200),
]);
assert_eq!(r, r2);
assert_eq!(r2, r3);
#[test]
fn test_parse_bytes_range_valid() {
let r: Range = Header::parse_header(&"bytes=1-100".into()).unwrap();
let r2: Range = Header::parse_header(&"bytes=1-100,-".into()).unwrap();
let r3 = Range::bytes(1, 100);
assert_eq!(r, r2);
assert_eq!(r2, r3);
let r: Range = Header::parse_header(&"bytes=1-100,-100".into()).unwrap();
let r2: Range = Header::parse_header(&"bytes=1-100, ,,-100".into()).unwrap();
let r3 = Range::Bytes(vec![
ByteRangeSpec::FromTo(1, 100),
ByteRangeSpec::Last(100),
]);
assert_eq!(r, r2);
assert_eq!(r2, r3);
let r: Range = Header::parse_header(&"bytes=1-100,200-".into()).unwrap();
let r2: Range =
Header::parse_header(&"bytes= 1-100 , 101-xxx, 200- ".into()).unwrap();
let r3 = Range::Bytes(vec![
ByteRangeSpec::FromTo(1, 100),
ByteRangeSpec::AllFrom(200),
]);
assert_eq!(r, r2);
assert_eq!(r2, r3);
let r: Range = Header::parse_header(&"custom=1-100,-100".into()).unwrap();
let r2 = Range::Unregistered("custom".to_owned(), "1-100,-100".to_owned());
assert_eq!(r, r2);
}
#[test]
fn test_parse_unregistered_range_valid() {
let r: Range = Header::parse_header(&"custom=1-100,-100".into()).unwrap();
let r2 = Range::Unregistered("custom".to_owned(), "1-100,-100".to_owned());
assert_eq!(r, r2);
let r: Range = Header::parse_header(&"custom=abcd".into()).unwrap();
let r2 = Range::Unregistered("custom".to_owned(), "abcd".to_owned());
assert_eq!(r, r2);
let r: Range = Header::parse_header(&"custom=xxx-yyy".into()).unwrap();
let r2 = Range::Unregistered("custom".to_owned(), "xxx-yyy".to_owned());
assert_eq!(r, r2);
}
#[test]
fn test_parse_invalid() {
let r: ::Result<Range> = Header::parse_header(&"bytes=1-a,-".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"bytes=1-2-3".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"abc".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"bytes=1-100=".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"bytes=".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"custom=".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"=1-100".into());
assert_eq!(r.ok(), None);
}
#[test]
fn test_fmt() {
use header::Headers;
let mut headers = Headers::new();
headers.set(Range::Bytes(vec![
ByteRangeSpec::FromTo(0, 1000),
ByteRangeSpec::AllFrom(2000),
]));
assert_eq!(&headers.to_string(), "Range: bytes=0-1000,2000-\r\n");
headers.clear();
headers.set(Range::Bytes(vec![]));
assert_eq!(&headers.to_string(), "Range: bytes=\r\n");
headers.clear();
headers.set(Range::Unregistered(
"custom".to_owned(),
"1-xxx".to_owned(),
));
assert_eq!(&headers.to_string(), "Range: custom=1-xxx\r\n");
}
#[test]
fn test_byte_range_spec_to_satisfiable_range() {
assert_eq!(
Some((0, 0)),
ByteRangeSpec::FromTo(0, 0).to_satisfiable_range(3)
);
assert_eq!(
Some((1, 2)),
ByteRangeSpec::FromTo(1, 2).to_satisfiable_range(3)
);
assert_eq!(
Some((1, 2)),
ByteRangeSpec::FromTo(1, 5).to_satisfiable_range(3)
);
assert_eq!(
None,
ByteRangeSpec::FromTo(3, 3).to_satisfiable_range(3)
);
assert_eq!(
None,
ByteRangeSpec::FromTo(2, 1).to_satisfiable_range(3)
);
assert_eq!(
None,
ByteRangeSpec::FromTo(0, 0).to_satisfiable_range(0)
);
assert_eq!(
Some((0, 2)),
ByteRangeSpec::AllFrom(0).to_satisfiable_range(3)
);
assert_eq!(
Some((2, 2)),
ByteRangeSpec::AllFrom(2).to_satisfiable_range(3)
);
assert_eq!(
None,
ByteRangeSpec::AllFrom(3).to_satisfiable_range(3)
);
assert_eq!(
None,
ByteRangeSpec::AllFrom(5).to_satisfiable_range(3)
);
assert_eq!(
None,
ByteRangeSpec::AllFrom(0).to_satisfiable_range(0)
);
assert_eq!(
Some((1, 2)),
ByteRangeSpec::Last(2).to_satisfiable_range(3)
);
assert_eq!(
Some((2, 2)),
ByteRangeSpec::Last(1).to_satisfiable_range(3)
);
assert_eq!(
Some((0, 2)),
ByteRangeSpec::Last(5).to_satisfiable_range(3)
);
assert_eq!(None, ByteRangeSpec::Last(0).to_satisfiable_range(3));
assert_eq!(None, ByteRangeSpec::Last(2).to_satisfiable_range(0));
let r: Range = Header::parse_header(&"bytes=1-100,-100".into()).unwrap();
let r2: Range = Header::parse_header(&"bytes=1-100, ,,-100".into()).unwrap();
let r3 = Range::Bytes(vec![
ByteRangeSpec::FromTo(1, 100),
ByteRangeSpec::Last(100),
]);
assert_eq!(r, r2);
assert_eq!(r2, r3);
let r: Range = Header::parse_header(&"custom=1-100,-100".into()).unwrap();
let r2 = Range::Unregistered("custom".to_owned(), "1-100,-100".to_owned());
assert_eq!(r, r2);
}
#[test]
fn test_parse_unregistered_range_valid() {
let r: Range = Header::parse_header(&"custom=1-100,-100".into()).unwrap();
let r2 = Range::Unregistered("custom".to_owned(), "1-100,-100".to_owned());
assert_eq!(r, r2);
let r: Range = Header::parse_header(&"custom=abcd".into()).unwrap();
let r2 = Range::Unregistered("custom".to_owned(), "abcd".to_owned());
assert_eq!(r, r2);
let r: Range = Header::parse_header(&"custom=xxx-yyy".into()).unwrap();
let r2 = Range::Unregistered("custom".to_owned(), "xxx-yyy".to_owned());
assert_eq!(r, r2);
}
#[test]
fn test_parse_invalid() {
let r: ::Result<Range> = Header::parse_header(&"bytes=1-a,-".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"bytes=1-2-3".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"abc".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"bytes=1-100=".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"bytes=".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"custom=".into());
assert_eq!(r.ok(), None);
let r: ::Result<Range> = Header::parse_header(&"=1-100".into());
assert_eq!(r.ok(), None);
}
#[test]
fn test_fmt() {
use header::Headers;
let mut headers = Headers::new();
headers.set(Range::Bytes(vec![
ByteRangeSpec::FromTo(0, 1000),
ByteRangeSpec::AllFrom(2000),
]));
assert_eq!(&headers.to_string(), "Range: bytes=0-1000,2000-\r\n");
headers.clear();
headers.set(Range::Bytes(vec![]));
assert_eq!(&headers.to_string(), "Range: bytes=\r\n");
headers.clear();
headers.set(Range::Unregistered("custom".to_owned(), "1-xxx".to_owned()));
assert_eq!(&headers.to_string(), "Range: custom=1-xxx\r\n");
}
#[test]
fn test_byte_range_spec_to_satisfiable_range() {
assert_eq!(
Some((0, 0)),
ByteRangeSpec::FromTo(0, 0).to_satisfiable_range(3)
);
assert_eq!(
Some((1, 2)),
ByteRangeSpec::FromTo(1, 2).to_satisfiable_range(3)
);
assert_eq!(
Some((1, 2)),
ByteRangeSpec::FromTo(1, 5).to_satisfiable_range(3)
);
assert_eq!(None, ByteRangeSpec::FromTo(3, 3).to_satisfiable_range(3));
assert_eq!(None, ByteRangeSpec::FromTo(2, 1).to_satisfiable_range(3));
assert_eq!(None, ByteRangeSpec::FromTo(0, 0).to_satisfiable_range(0));
assert_eq!(
Some((0, 2)),
ByteRangeSpec::AllFrom(0).to_satisfiable_range(3)
);
assert_eq!(
Some((2, 2)),
ByteRangeSpec::AllFrom(2).to_satisfiable_range(3)
);
assert_eq!(None, ByteRangeSpec::AllFrom(3).to_satisfiable_range(3));
assert_eq!(None, ByteRangeSpec::AllFrom(5).to_satisfiable_range(3));
assert_eq!(None, ByteRangeSpec::AllFrom(0).to_satisfiable_range(0));
assert_eq!(Some((1, 2)), ByteRangeSpec::Last(2).to_satisfiable_range(3));
assert_eq!(Some((2, 2)), ByteRangeSpec::Last(1).to_satisfiable_range(3));
assert_eq!(Some((0, 2)), ByteRangeSpec::Last(5).to_satisfiable_range(3));
assert_eq!(None, ByteRangeSpec::Last(0).to_satisfiable_range(3));
assert_eq!(None, ByteRangeSpec::Last(2).to_satisfiable_range(0));
}
}

View File

@ -7,7 +7,7 @@ use http::header::{HeaderName, HeaderValue};
/// A set of HTTP headers
///
/// `HeaderMap` is an multimap of [`HeaderName`] to values.
/// `HeaderMap` is an multi-map of [`HeaderName`] to values.
///
/// [`HeaderName`]: struct.HeaderName.html
#[derive(Debug, Clone)]

View File

@ -137,17 +137,22 @@ impl FromStr for Charset {
}
}
#[test]
fn test_parse() {
assert_eq!(Us_Ascii, "us-ascii".parse().unwrap());
assert_eq!(Us_Ascii, "US-Ascii".parse().unwrap());
assert_eq!(Us_Ascii, "US-ASCII".parse().unwrap());
assert_eq!(Shift_Jis, "Shift-JIS".parse().unwrap());
assert_eq!(Ext("ABCD".to_owned()), "abcd".parse().unwrap());
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_display() {
assert_eq!("US-ASCII", format!("{}", Us_Ascii));
assert_eq!("ABCD", format!("{}", Ext("ABCD".to_owned())));
#[test]
fn test_parse() {
assert_eq!(Us_Ascii, "us-ascii".parse().unwrap());
assert_eq!(Us_Ascii, "US-Ascii".parse().unwrap());
assert_eq!(Us_Ascii, "US-ASCII".parse().unwrap());
assert_eq!(Shift_Jis, "Shift-JIS".parse().unwrap());
assert_eq!(Ext("ABCD".to_owned()), "abcd".parse().unwrap());
}
#[test]
fn test_display() {
assert_eq!("US-ASCII", format!("{}", Us_Ascii));
assert_eq!("ABCD", format!("{}", Ext("ABCD".to_owned())));
}
}

View File

@ -1,59 +1,46 @@
use std::fmt::{self, Display};
use std::io::Write;
use std::str::FromStr;
use std::time::{Duration, SystemTime, UNIX_EPOCH};
use std::time::{SystemTime, UNIX_EPOCH};
use bytes::{buf::BufMutExt, BytesMut};
use http::header::{HeaderValue, InvalidHeaderValue};
use time::{offset, OffsetDateTime, PrimitiveDateTime};
use crate::error::ParseError;
use crate::header::IntoHeaderValue;
use crate::time_parser;
/// A timestamp with HTTP formatting and parsing
#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord)]
pub struct HttpDate(time::Tm);
pub struct HttpDate(OffsetDateTime);
impl FromStr for HttpDate {
type Err = ParseError;
fn from_str(s: &str) -> Result<HttpDate, ParseError> {
match time::strptime(s, "%a, %d %b %Y %T %Z")
.or_else(|_| time::strptime(s, "%A, %d-%b-%y %T %Z"))
.or_else(|_| time::strptime(s, "%c"))
{
Ok(t) => Ok(HttpDate(t)),
Err(_) => Err(ParseError::Header),
match time_parser::parse_http_date(s) {
Some(t) => Ok(HttpDate(t.assume_utc())),
None => Err(ParseError::Header),
}
}
}
impl Display for HttpDate {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(&self.0.to_utc().rfc822(), f)
fmt::Display::fmt(&self.0.format("%a, %d %b %Y %H:%M:%S GMT"), f)
}
}
impl From<time::Tm> for HttpDate {
fn from(tm: time::Tm) -> HttpDate {
HttpDate(tm)
impl From<OffsetDateTime> for HttpDate {
fn from(dt: OffsetDateTime) -> HttpDate {
HttpDate(dt)
}
}
impl From<SystemTime> for HttpDate {
fn from(sys: SystemTime) -> HttpDate {
let tmspec = match sys.duration_since(UNIX_EPOCH) {
Ok(dur) => {
time::Timespec::new(dur.as_secs() as i64, dur.subsec_nanos() as i32)
}
Err(err) => {
let neg = err.duration();
time::Timespec::new(
-(neg.as_secs() as i64),
-(neg.subsec_nanos() as i32),
)
}
};
HttpDate(time::at_utc(tmspec))
HttpDate(PrimitiveDateTime::from(sys).assume_utc())
}
}
@ -62,56 +49,51 @@ impl IntoHeaderValue for HttpDate {
fn try_into(self) -> Result<HeaderValue, Self::Error> {
let mut wrt = BytesMut::with_capacity(29).writer();
write!(wrt, "{}", self.0.rfc822()).unwrap();
write!(
wrt,
"{}",
self.0
.to_offset(offset!(UTC))
.format("%a, %d %b %Y %H:%M:%S GMT")
)
.unwrap();
HeaderValue::from_maybe_shared(wrt.get_mut().split().freeze())
}
}
impl From<HttpDate> for SystemTime {
fn from(date: HttpDate) -> SystemTime {
let spec = date.0.to_timespec();
if spec.sec >= 0 {
UNIX_EPOCH + Duration::new(spec.sec as u64, spec.nsec as u32)
} else {
UNIX_EPOCH - Duration::new(spec.sec as u64, spec.nsec as u32)
}
let dt = date.0;
let epoch = OffsetDateTime::unix_epoch();
UNIX_EPOCH + (dt - epoch)
}
}
#[cfg(test)]
mod tests {
use super::HttpDate;
use time::Tm;
const NOV_07: HttpDate = HttpDate(Tm {
tm_nsec: 0,
tm_sec: 37,
tm_min: 48,
tm_hour: 8,
tm_mday: 7,
tm_mon: 10,
tm_year: 94,
tm_wday: 0,
tm_isdst: 0,
tm_yday: 0,
tm_utcoff: 0,
});
use time::{date, time, PrimitiveDateTime};
#[test]
fn test_date() {
let nov_07 = HttpDate(
PrimitiveDateTime::new(date!(1994 - 11 - 07), time!(8:48:37)).assume_utc(),
);
assert_eq!(
"Sun, 07 Nov 1994 08:48:37 GMT".parse::<HttpDate>().unwrap(),
NOV_07
nov_07
);
assert_eq!(
"Sunday, 07-Nov-94 08:48:37 GMT"
.parse::<HttpDate>()
.unwrap(),
NOV_07
nov_07
);
assert_eq!(
"Sun Nov 7 08:48:37 1994".parse::<HttpDate>().unwrap(),
NOV_07
nov_07
);
assert!("this-is-no-date".parse::<HttpDate>().is_err());
}

View File

@ -1,172 +1,130 @@
use std::{io, mem, ptr, slice};
use std::io;
use bytes::{BufMut, BytesMut};
use http::Version;
use crate::extensions::Extensions;
const DEC_DIGITS_LUT: &[u8] = b"0001020304050607080910111213141516171819\
2021222324252627282930313233343536373839\
4041424344454647484950515253545556575859\
6061626364656667686970717273747576777879\
8081828384858687888990919293949596979899";
const DIGITS_START: u8 = b'0';
pub(crate) const STATUS_LINE_BUF_SIZE: usize = 13;
pub(crate) fn write_status_line(version: Version, mut n: u16, bytes: &mut BytesMut) {
let mut buf: [u8; STATUS_LINE_BUF_SIZE] = [
b'H', b'T', b'T', b'P', b'/', b'1', b'.', b'1', b' ', b' ', b' ', b' ', b' ',
];
pub(crate) fn write_status_line(version: Version, n: u16, bytes: &mut BytesMut) {
match version {
Version::HTTP_2 => buf[5] = b'2',
Version::HTTP_10 => buf[7] = b'0',
Version::HTTP_09 => {
buf[5] = b'0';
buf[7] = b'9';
}
_ => (),
}
let mut curr: isize = 12;
let buf_ptr = buf.as_mut_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
let four = n > 999;
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1 as isize), buf_ptr.offset(curr), 2);
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0';
}
} else {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(
lut_ptr.offset(d1 as isize),
buf_ptr.offset(curr),
2,
);
Version::HTTP_11 => bytes.put_slice(b"HTTP/1.1 "),
Version::HTTP_10 => bytes.put_slice(b"HTTP/1.0 "),
Version::HTTP_09 => bytes.put_slice(b"HTTP/0.9 "),
_ => {
// other HTTP version handlers do not use this method
}
}
bytes.put_slice(&buf);
if four {
bytes.put_u8(b' ');
}
let d100 = (n / 100) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
// trailing space before reason
bytes.put_u8(b' ');
}
/// NOTE: bytes object has to contain enough space
pub fn write_content_length(mut n: usize, bytes: &mut BytesMut) {
pub fn write_content_length(n: usize, bytes: &mut BytesMut) {
bytes.put_slice(b"\r\ncontent-length: ");
if n < 10 {
let mut buf: [u8; 21] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l', b'e',
b'n', b'g', b't', b'h', b':', b' ', b'0', b'\r', b'\n',
];
buf[18] = (n as u8) + b'0';
bytes.put_slice(&buf);
bytes.put_u8(DIGITS_START + (n as u8));
} else if n < 100 {
let mut buf: [u8; 22] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l', b'e',
b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'\r', b'\n',
];
let d1 = n << 1;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(18),
2,
);
}
bytes.put_slice(&buf);
let n = n as u8;
let d10 = n / 10;
let d1 = n % 10;
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 1000 {
let mut buf: [u8; 23] = [
b'\r', b'\n', b'c', b'o', b'n', b't', b'e', b'n', b't', b'-', b'l', b'e',
b'n', b'g', b't', b'h', b':', b' ', b'0', b'0', b'0', b'\r', b'\n',
];
// decode 2 more chars, if > 2 chars
let d1 = (n % 100) << 1;
n /= 100;
unsafe {
ptr::copy_nonoverlapping(
DEC_DIGITS_LUT.as_ptr().add(d1),
buf.as_mut_ptr().offset(19),
2,
)
};
let n = n as u16;
// decode last 1
buf[18] = (n as u8) + b'0';
let d100 = (n / 100) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_slice(&buf);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 10_000 {
let n = n as u16;
let d1000 = (n / 1000) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 100_000 {
let n = n as u32;
let d10000 = (n / 10000) as u8;
let d1000 = ((n / 1000) % 10) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d10000);
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else if n < 1_000_000 {
let n = n as u32;
let d100000 = (n / 100_000) as u8;
let d10000 = ((n / 10000) % 10) as u8;
let d1000 = ((n / 1000) % 10) as u8;
let d100 = ((n / 100) % 10) as u8;
let d10 = ((n / 10) % 10) as u8;
let d1 = (n % 10) as u8;
bytes.put_u8(DIGITS_START + d100000);
bytes.put_u8(DIGITS_START + d10000);
bytes.put_u8(DIGITS_START + d1000);
bytes.put_u8(DIGITS_START + d100);
bytes.put_u8(DIGITS_START + d10);
bytes.put_u8(DIGITS_START + d1);
} else {
bytes.put_slice(b"\r\ncontent-length: ");
convert_usize(n, bytes);
write_usize(n, bytes);
}
bytes.put_slice(b"\r\n");
}
pub(crate) fn convert_usize(mut n: usize, bytes: &mut BytesMut) {
let mut curr: isize = 39;
let mut buf: [u8; 41] = unsafe { mem::MaybeUninit::uninit().assume_init() };
buf[39] = b'\r';
buf[40] = b'\n';
let buf_ptr = buf.as_mut_ptr();
let lut_ptr = DEC_DIGITS_LUT.as_ptr();
pub(crate) fn write_usize(n: usize, bytes: &mut BytesMut) {
let mut n = n;
// eagerly decode 4 characters at a time
while n >= 10_000 {
let rem = (n % 10_000) as isize;
n /= 10_000;
// 20 chars is max length of a usize (2^64)
// digits will be added to the buffer from lsd to msd
let mut buf = BytesMut::with_capacity(20);
let d1 = (rem / 100) << 1;
let d2 = (rem % 100) << 1;
curr -= 4;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
ptr::copy_nonoverlapping(lut_ptr.offset(d2), buf_ptr.offset(curr + 2), 2);
}
while n > 9 {
// "pop" the least-significant digit
let lsd = (n % 10) as u8;
// remove the lsd from n
n /= 10;
buf.put_u8(DIGITS_START + lsd);
}
// if we reach here numbers are <= 9999, so at most 4 chars long
let mut n = n as isize; // possibly reduce 64bit math
// put msd to result buffer
bytes.put_u8(DIGITS_START + (n as u8));
// decode 2 more chars, if > 2 chars
if n >= 100 {
let d1 = (n % 100) << 1;
n /= 100;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
// decode last 1 or 2 chars
if n < 10 {
curr -= 1;
unsafe {
*buf_ptr.offset(curr) = (n as u8) + b'0';
}
} else {
let d1 = n << 1;
curr -= 2;
unsafe {
ptr::copy_nonoverlapping(lut_ptr.offset(d1), buf_ptr.offset(curr), 2);
}
}
unsafe {
bytes.extend_from_slice(slice::from_raw_parts(
buf_ptr.offset(curr),
41 - curr as usize,
));
// put, in reverse (msd to lsd), remaining digits to buffer
for i in (0..buf.len()).rev() {
bytes.put_u8(buf[i]);
}
}
@ -196,8 +154,28 @@ impl<T: Clone + 'static> DataFactory for Data<T> {
#[cfg(test)]
mod tests {
use std::str::from_utf8;
use super::*;
#[test]
fn test_status_line() {
let mut bytes = BytesMut::new();
bytes.reserve(50);
write_status_line(Version::HTTP_11, 200, &mut bytes);
assert_eq!(from_utf8(&bytes.split().freeze()).unwrap(), "HTTP/1.1 200 ");
let mut bytes = BytesMut::new();
bytes.reserve(50);
write_status_line(Version::HTTP_09, 404, &mut bytes);
assert_eq!(from_utf8(&bytes.split().freeze()).unwrap(), "HTTP/0.9 404 ");
let mut bytes = BytesMut::new();
bytes.reserve(50);
write_status_line(Version::HTTP_09, 515, &mut bytes);
assert_eq!(from_utf8(&bytes.split().freeze()).unwrap(), "HTTP/0.9 515 ");
}
#[test]
fn test_write_content_length() {
let mut bytes = BytesMut::new();
@ -231,5 +209,48 @@ mod tests {
bytes.reserve(50);
write_content_length(5909, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 5909\r\n"[..]);
bytes.reserve(50);
write_content_length(9999, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 9999\r\n"[..]);
bytes.reserve(50);
write_content_length(10001, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 10001\r\n"[..]);
bytes.reserve(50);
write_content_length(59094, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 59094\r\n"[..]);
bytes.reserve(50);
write_content_length(99999, &mut bytes);
assert_eq!(bytes.split().freeze(), b"\r\ncontent-length: 99999\r\n"[..]);
bytes.reserve(50);
write_content_length(590947, &mut bytes);
assert_eq!(
bytes.split().freeze(),
b"\r\ncontent-length: 590947\r\n"[..]
);
bytes.reserve(50);
write_content_length(999999, &mut bytes);
assert_eq!(
bytes.split().freeze(),
b"\r\ncontent-length: 999999\r\n"[..]
);
bytes.reserve(50);
write_content_length(5909471, &mut bytes);
assert_eq!(
bytes.split().freeze(),
b"\r\ncontent-length: 5909471\r\n"[..]
);
bytes.reserve(50);
write_content_length(59094718, &mut bytes);
assert_eq!(
bytes.split().freeze(),
b"\r\ncontent-length: 59094718\r\n"[..]
);
bytes.reserve(50);
write_content_length(4294973728, &mut bytes);
assert_eq!(
bytes.split().freeze(),
b"\r\ncontent-length: 4294973728\r\n"[..]
);
}
}

View File

@ -1,5 +1,5 @@
//! Basic http primitives for actix-net framework.
#![deny(rust_2018_idioms, warnings)]
#![warn(rust_2018_idioms, warnings)]
#![allow(
clippy::type_complexity,
clippy::too_many_arguments,
@ -10,6 +10,9 @@
#[macro_use]
extern crate log;
#[macro_use]
mod macros;
pub mod body;
mod builder;
pub mod client;
@ -27,6 +30,7 @@ mod payload;
mod request;
mod response;
mod service;
mod time_parser;
pub mod cookie;
pub mod error;

95
actix-http/src/macros.rs Normal file
View File

@ -0,0 +1,95 @@
#[macro_export]
macro_rules! downcast_get_type_id {
() => {
/// A helper method to get the type ID of the type
/// this trait is implemented on.
/// This method is unsafe to *implement*, since `downcast_ref` relies
/// on the returned `TypeId` to perform a cast.
///
/// Unfortunately, Rust has no notion of a trait method that is
/// unsafe to implement (marking it as `unsafe` makes it unsafe
/// to *call*). As a workaround, we require this method
/// to return a private type along with the `TypeId`. This
/// private type (`PrivateHelper`) has a private constructor,
/// making it impossible for safe code to construct outside of
/// this module. This ensures that safe code cannot violate
/// type-safety by implementing this method.
#[doc(hidden)]
fn __private_get_type_id__(&self) -> (std::any::TypeId, PrivateHelper)
where
Self: 'static,
{
(std::any::TypeId::of::<Self>(), PrivateHelper(()))
}
};
}
//Generate implementation for dyn $name
#[macro_export]
macro_rules! downcast {
($name:ident) => {
/// A struct with a private constructor, for use with
/// `__private_get_type_id__`. Its single field is private,
/// ensuring that it can only be constructed from this module
#[doc(hidden)]
pub struct PrivateHelper(());
impl dyn $name + 'static {
/// Downcasts generic body to a specific type.
pub fn downcast_ref<T: $name + 'static>(&self) -> Option<&T> {
if self.__private_get_type_id__().0 == std::any::TypeId::of::<T>() {
// Safety: external crates cannot override the default
// implementation of `__private_get_type_id__`, since
// it requires returning a private type. We can therefore
// rely on the returned `TypeId`, which ensures that this
// case is correct.
unsafe { Some(&*(self as *const dyn $name as *const T)) }
} else {
None
}
}
/// Downcasts a generic body to a mutable specific type.
pub fn downcast_mut<T: $name + 'static>(&mut self) -> Option<&mut T> {
if self.__private_get_type_id__().0 == std::any::TypeId::of::<T>() {
// Safety: external crates cannot override the default
// implementation of `__private_get_type_id__`, since
// it requires returning a private type. We can therefore
// rely on the returned `TypeId`, which ensures that this
// case is correct.
unsafe {
Some(&mut *(self as *const dyn $name as *const T as *mut T))
}
} else {
None
}
}
}
};
}
#[cfg(test)]
mod tests {
trait MB {
downcast_get_type_id!();
}
downcast!(MB);
impl MB for String {}
impl MB for () {}
#[actix_rt::test]
async fn test_any_casting() {
let mut body = String::from("hello cast");
let resp_body: &mut dyn MB = &mut body;
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast");
let body = &mut resp_body.downcast_mut::<String>().unwrap();
body.push_str("!");
let body = resp_body.downcast_ref::<String>().unwrap();
assert_eq!(body, "hello cast!");
let not_body = resp_body.downcast_ref::<()>();
assert!(not_body.is_none());
}
}

View File

@ -99,13 +99,13 @@ impl RequestHead {
}
/// Is to uppercase headers with Camel-Case.
/// Befault is `false`
/// Default is `false`
#[inline]
pub fn camel_case_headers(&self) -> bool {
self.flags.contains(Flags::CAMEL_CASE)
}
/// Set `true` to send headers which are uppercased with Camel-Case.
/// Set `true` to send headers which are formatted as Camel-Case.
#[inline]
pub fn set_camel_case_headers(&mut self, val: bool) {
if val {

View File

@ -9,7 +9,6 @@ use std::{fmt, str};
use bytes::{Bytes, BytesMut};
use futures_core::Stream;
use serde::Serialize;
use serde_json;
use crate::body::{Body, BodyStream, MessageBody, ResponseBody};
use crate::cookie::{Cookie, CookieJar};
@ -637,7 +636,7 @@ impl ResponseBuilder {
/// `ResponseBuilder` can not be used after this call.
pub fn streaming<S, E>(&mut self, stream: S) -> Response
where
S: Stream<Item = Result<Bytes, E>> + 'static,
S: Stream<Item = Result<Bytes, E>> + Unpin + 'static,
E: Into<Error> + 'static,
{
self.body(Body::from_message(BodyStream::new(stream)))

View File

@ -443,7 +443,7 @@ where
}
/// `Service` implementation for http transport
pub struct HttpServiceHandler<T, S, B, X, U> {
pub struct HttpServiceHandler<T, S: Service, B, X: Service, U: Service> {
srv: CloneableService<S>,
expect: CloneableService<X>,
upgrade: Option<CloneableService<U>>,

View File

@ -0,0 +1,42 @@
use time::{Date, OffsetDateTime, PrimitiveDateTime};
/// Attempt to parse a `time` string as one of either RFC 1123, RFC 850, or asctime.
pub fn parse_http_date(time: &str) -> Option<PrimitiveDateTime> {
try_parse_rfc_1123(time)
.or_else(|| try_parse_rfc_850(time))
.or_else(|| try_parse_asctime(time))
}
/// Attempt to parse a `time` string as a RFC 1123 formatted date time string.
fn try_parse_rfc_1123(time: &str) -> Option<PrimitiveDateTime> {
time::parse(time, "%a, %d %b %Y %H:%M:%S").ok()
}
/// Attempt to parse a `time` string as a RFC 850 formatted date time string.
fn try_parse_rfc_850(time: &str) -> Option<PrimitiveDateTime> {
match PrimitiveDateTime::parse(time, "%A, %d-%b-%y %H:%M:%S") {
Ok(dt) => {
// If the `time` string contains a two-digit year, then as per RFC 2616 § 19.3,
// we consider the year as part of this century if it's within the next 50 years,
// otherwise we consider as part of the previous century.
let now = OffsetDateTime::now_utc();
let century_start_year = (now.year() / 100) * 100;
let mut expanded_year = century_start_year + dt.year();
if expanded_year > now.year() + 50 {
expanded_year -= 100;
}
match Date::try_from_ymd(expanded_year, dt.month(), dt.day()) {
Ok(date) => Some(PrimitiveDateTime::new(date, dt.time())),
Err(_) => None,
}
}
Err(_) => None,
}
}
/// Attempt to parse a `time` string using ANSI C's `asctime` format.
fn try_parse_asctime(time: &str) -> Option<PrimitiveDateTime> {
time::parse(time, "%a %b %_d %H:%M:%S %Y").ok()
}

View File

@ -137,7 +137,7 @@ impl Encoder for Codec {
Parser::write_message(
dst,
&data[..],
OpCode::Binary,
OpCode::Text,
false,
!self.flags.contains(Flags::SERVER),
)
@ -151,7 +151,7 @@ impl Encoder for Codec {
Parser::write_message(
dst,
&data[..],
OpCode::Text,
OpCode::Binary,
false,
!self.flags.contains(Flags::SERVER),
)

View File

@ -2,7 +2,6 @@ use std::convert::TryFrom;
use bytes::{Buf, BufMut, BytesMut};
use log::debug;
use rand;
use crate::ws::mask::apply_mask;
use crate::ws::proto::{CloseCode, CloseReason, OpCode};

View File

@ -58,6 +58,8 @@ pub enum ProtocolError {
Io(io::Error),
}
impl std::error::Error for ProtocolError {}
impl ResponseError for ProtocolError {}
/// Websocket handshake errors
@ -108,7 +110,7 @@ impl ResponseError for HandshakeError {
}
}
/// Verify `WebSocket` handshake request and create handshake reponse.
/// Verify `WebSocket` handshake request and create handshake response.
// /// `protocols` is a sequence of known protocols. On successful handshake,
// /// the returned response headers contain the first protocol in this list
// /// which the server also knows.
@ -168,7 +170,7 @@ pub fn verify_handshake(req: &RequestHead) -> Result<(), HandshakeError> {
Ok(())
}
/// Create websocket's handshake response
/// Create websocket handshake response
///
/// This function returns handshake `Response`, ready to send to peer.
pub fn handshake_response(req: &RequestHead) -> ResponseBuilder {

View File

@ -1,5 +1,3 @@
use base64;
use sha1;
use std::convert::{From, Into};
use std::fmt;
@ -205,14 +203,15 @@ impl<T: Into<String>> From<(CloseCode, T)> for CloseReason {
static WS_GUID: &str = "258EAFA5-E914-47DA-95CA-C5AB0DC85B11";
// TODO: hash is always same size, we dont need String
// TODO: hash is always same size, we don't need String
pub fn hash_key(key: &[u8]) -> String {
use sha1::Digest;
let mut hasher = sha1::Sha1::new();
hasher.update(key);
hasher.update(WS_GUID.as_bytes());
hasher.input(key);
hasher.input(WS_GUID.as_bytes());
base64::encode(&hasher.digest().bytes())
base64::encode(hasher.result().as_ref())
}
#[cfg(test)]
@ -277,6 +276,12 @@ mod test {
assert_eq!(format!("{}", OpCode::Bad), "BAD");
}
#[test]
fn test_hash_key() {
let hash = hash_key(b"hello actix-web");
assert_eq!(&hash, "cR1dlyUUJKp0s/Bel25u5TgvC3E=");
}
#[test]
fn closecode_from_u16() {
assert_eq!(CloseCode::from(1000u16), CloseCode::Normal);

View File

@ -33,7 +33,8 @@ async fn test_h1_v2() {
HttpService::build()
.finish(|_| future::ok::<_, ()>(Response::Ok().body(STR)))
.tcp()
});
})
.await;
let response = srv.get("/").send().await.unwrap();
assert!(response.status().is_success());
@ -61,7 +62,8 @@ async fn test_connection_close() {
.finish(|_| ok::<_, ()>(Response::Ok().body(STR)))
.tcp()
.map(|_| ())
});
})
.await;
let response = srv.get("/").force_close().send().await.unwrap();
assert!(response.status().is_success());
@ -80,7 +82,8 @@ async fn test_with_query_parameter() {
})
.tcp()
.map(|_| ())
});
})
.await;
let request = srv.request(http::Method::GET, srv.url("/?qp=5"));
let response = request.send().await.unwrap();

View File

@ -67,7 +67,8 @@ async fn test_h2() -> io::Result<()> {
.h2(|_| ok::<_, Error>(Response::Ok().finish()))
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -85,7 +86,8 @@ async fn test_h2_1() -> io::Result<()> {
})
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -97,15 +99,14 @@ async fn test_h2_body() -> io::Result<()> {
let data = "HELLOWORLD".to_owned().repeat(64 * 1024);
let mut srv = test_server(move || {
HttpService::build()
.h2(|mut req: Request<_>| {
async move {
let body = load_body(req.take_payload()).await?;
Ok::<_, Error>(Response::Ok().body(body))
}
.h2(|mut req: Request<_>| async move {
let body = load_body(req.take_payload()).await?;
Ok::<_, Error>(Response::Ok().body(body))
})
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let response = srv.sget("/").send_body(data.clone()).await.unwrap();
assert!(response.status().is_success());
@ -133,7 +134,8 @@ async fn test_h2_content_length() {
})
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let header = HeaderName::from_static("content-length");
let value = HeaderValue::from_static("0");
@ -194,7 +196,7 @@ async fn test_h2_headers() {
})
.openssl(ssl_acceptor())
.map_err(|_| ())
});
}).await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -233,7 +235,8 @@ async fn test_h2_body2() {
.h2(|_| ok::<_, ()>(Response::Ok().body(STR)))
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -250,7 +253,8 @@ async fn test_h2_head_empty() {
.finish(|_| ok::<_, ()>(Response::Ok().body(STR)))
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let response = srv.shead("/").send().await.unwrap();
assert!(response.status().is_success());
@ -275,7 +279,8 @@ async fn test_h2_head_binary() {
})
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let response = srv.shead("/").send().await.unwrap();
assert!(response.status().is_success());
@ -297,7 +302,8 @@ async fn test_h2_head_binary2() {
.h2(|_| ok::<_, ()>(Response::Ok().body(STR)))
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let response = srv.shead("/").send().await.unwrap();
assert!(response.status().is_success());
@ -320,7 +326,8 @@ async fn test_h2_body_length() {
})
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -344,7 +351,8 @@ async fn test_h2_body_chunked_explicit() {
})
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -371,7 +379,8 @@ async fn test_h2_response_http_error_handling() {
}))
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert_eq!(response.status(), StatusCode::INTERNAL_SERVER_ERROR);
@ -388,7 +397,8 @@ async fn test_h2_service_error() {
.h2(|_| err::<Response, Error>(ErrorBadRequest("error")))
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert_eq!(response.status(), StatusCode::BAD_REQUEST);
@ -409,7 +419,8 @@ async fn test_h2_on_connect() {
})
.openssl(ssl_acceptor())
.map_err(|_| ())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());

View File

@ -45,7 +45,8 @@ async fn test_h1() -> io::Result<()> {
HttpService::build()
.h1(|_| future::ok::<_, Error>(Response::Ok().finish()))
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -58,7 +59,8 @@ async fn test_h2() -> io::Result<()> {
HttpService::build()
.h2(|_| future::ok::<_, Error>(Response::Ok().finish()))
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -75,7 +77,8 @@ async fn test_h1_1() -> io::Result<()> {
future::ok::<_, Error>(Response::Ok().finish())
})
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -92,7 +95,8 @@ async fn test_h2_1() -> io::Result<()> {
future::ok::<_, Error>(Response::Ok().finish())
})
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -104,14 +108,13 @@ async fn test_h2_body1() -> io::Result<()> {
let data = "HELLOWORLD".to_owned().repeat(64 * 1024);
let mut srv = test_server(move || {
HttpService::build()
.h2(|mut req: Request<_>| {
async move {
let body = load_body(req.take_payload()).await?;
Ok::<_, Error>(Response::Ok().body(body))
}
.h2(|mut req: Request<_>| async move {
let body = load_body(req.take_payload()).await?;
Ok::<_, Error>(Response::Ok().body(body))
})
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.sget("/").send_body(data.clone()).await.unwrap();
assert!(response.status().is_success());
@ -138,7 +141,8 @@ async fn test_h2_content_length() {
future::ok::<_, ()>(Response::new(statuses[indx]))
})
.rustls(ssl_acceptor())
});
})
.await;
let header = HeaderName::from_static("content-length");
let value = HeaderValue::from_static("0");
@ -197,7 +201,7 @@ async fn test_h2_headers() {
future::ok::<_, ()>(config.body(data.clone()))
})
.rustls(ssl_acceptor())
});
}).await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -235,7 +239,8 @@ async fn test_h2_body2() {
HttpService::build()
.h2(|_| future::ok::<_, ()>(Response::Ok().body(STR)))
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -251,7 +256,8 @@ async fn test_h2_head_empty() {
HttpService::build()
.finish(|_| ok::<_, ()>(Response::Ok().body(STR)))
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.shead("/").send().await.unwrap();
assert!(response.status().is_success());
@ -278,7 +284,8 @@ async fn test_h2_head_binary() {
ok::<_, ()>(Response::Ok().content_length(STR.len() as u64).body(STR))
})
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.shead("/").send().await.unwrap();
assert!(response.status().is_success());
@ -302,7 +309,8 @@ async fn test_h2_head_binary2() {
HttpService::build()
.h2(|_| ok::<_, ()>(Response::Ok().body(STR)))
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.shead("/").send().await.unwrap();
assert!(response.status().is_success());
@ -327,7 +335,8 @@ async fn test_h2_body_length() {
)
})
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -350,7 +359,8 @@ async fn test_h2_body_chunked_explicit() {
)
})
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert!(response.status().is_success());
@ -378,7 +388,8 @@ async fn test_h2_response_http_error_handling() {
}))
}))
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert_eq!(response.status(), http::StatusCode::INTERNAL_SERVER_ERROR);
@ -394,7 +405,8 @@ async fn test_h2_service_error() {
HttpService::build()
.h2(|_| err::<Response, Error>(error::ErrorBadRequest("error")))
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert_eq!(response.status(), http::StatusCode::BAD_REQUEST);
@ -410,7 +422,8 @@ async fn test_h1_service_error() {
HttpService::build()
.h1(|_| err::<Response, Error>(error::ErrorBadRequest("error")))
.rustls(ssl_acceptor())
});
})
.await;
let response = srv.sget("/").send().await.unwrap();
assert_eq!(response.status(), http::StatusCode::BAD_REQUEST);

View File

@ -27,7 +27,8 @@ async fn test_h1() {
future::ok::<_, ()>(Response::Ok().finish())
})
.tcp()
});
})
.await;
let response = srv.get("/").send().await.unwrap();
assert!(response.status().is_success());
@ -46,7 +47,8 @@ async fn test_h1_2() {
future::ok::<_, ()>(Response::Ok().finish())
})
.tcp()
});
})
.await;
let response = srv.get("/").send().await.unwrap();
assert!(response.status().is_success());
@ -65,7 +67,8 @@ async fn test_expect_continue() {
}))
.finish(|_| future::ok::<_, ()>(Response::Ok().finish()))
.tcp()
});
})
.await;
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
let _ = stream.write_all(b"GET /test HTTP/1.1\r\nexpect: 100-continue\r\n\r\n");
@ -95,7 +98,8 @@ async fn test_expect_continue_h1() {
}))
.h1(fn_service(|_| future::ok::<_, ()>(Response::Ok().finish())))
.tcp()
});
})
.await;
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
let _ = stream.write_all(b"GET /test HTTP/1.1\r\nexpect: 100-continue\r\n\r\n");
@ -130,7 +134,8 @@ async fn test_chunked_payload() {
})
}))
.tcp()
});
})
.await;
let returned_size = {
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
@ -172,7 +177,8 @@ async fn test_slow_request() {
.client_timeout(100)
.finish(|_| future::ok::<_, ()>(Response::Ok().finish()))
.tcp()
});
})
.await;
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
let _ = stream.write_all(b"GET /test/tests/test HTTP/1.1\r\n");
@ -187,7 +193,8 @@ async fn test_http1_malformed_request() {
HttpService::build()
.h1(|_| future::ok::<_, ()>(Response::Ok().finish()))
.tcp()
});
})
.await;
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
let _ = stream.write_all(b"GET /test/tests/test HTTP1.1\r\n");
@ -202,7 +209,8 @@ async fn test_http1_keepalive() {
HttpService::build()
.h1(|_| future::ok::<_, ()>(Response::Ok().finish()))
.tcp()
});
})
.await;
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
let _ = stream.write_all(b"GET /test/tests/test HTTP/1.1\r\n\r\n");
@ -223,7 +231,8 @@ async fn test_http1_keepalive_timeout() {
.keep_alive(1)
.h1(|_| future::ok::<_, ()>(Response::Ok().finish()))
.tcp()
});
})
.await;
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
let _ = stream.write_all(b"GET /test/tests/test HTTP/1.1\r\n\r\n");
@ -243,7 +252,8 @@ async fn test_http1_keepalive_close() {
HttpService::build()
.h1(|_| future::ok::<_, ()>(Response::Ok().finish()))
.tcp()
});
})
.await;
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
let _ =
@ -263,7 +273,8 @@ async fn test_http10_keepalive_default_close() {
HttpService::build()
.h1(|_| future::ok::<_, ()>(Response::Ok().finish()))
.tcp()
});
})
.await;
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
let _ = stream.write_all(b"GET /test/tests/test HTTP/1.0\r\n\r\n");
@ -282,7 +293,8 @@ async fn test_http10_keepalive() {
HttpService::build()
.h1(|_| future::ok::<_, ()>(Response::Ok().finish()))
.tcp()
});
})
.await;
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
let _ = stream
@ -309,7 +321,8 @@ async fn test_http1_keepalive_disabled() {
.keep_alive(KeepAlive::Disabled)
.h1(|_| future::ok::<_, ()>(Response::Ok().finish()))
.tcp()
});
})
.await;
let mut stream = net::TcpStream::connect(srv.addr()).unwrap();
let _ = stream.write_all(b"GET /test/tests/test HTTP/1.1\r\n\r\n");
@ -344,7 +357,8 @@ async fn test_content_length() {
future::ok::<_, ()>(Response::new(statuses[indx]))
})
.tcp()
});
})
.await;
let header = HeaderName::from_static("content-length");
let value = HeaderValue::from_static("0");
@ -397,7 +411,7 @@ async fn test_h1_headers() {
}
future::ok::<_, ()>(builder.body(data.clone()))
}).tcp()
});
}).await;
let response = srv.get("/").send().await.unwrap();
assert!(response.status().is_success());
@ -435,7 +449,8 @@ async fn test_h1_body() {
HttpService::build()
.h1(|_| ok::<_, ()>(Response::Ok().body(STR)))
.tcp()
});
})
.await;
let response = srv.get("/").send().await.unwrap();
assert!(response.status().is_success());
@ -451,7 +466,8 @@ async fn test_h1_head_empty() {
HttpService::build()
.h1(|_| ok::<_, ()>(Response::Ok().body(STR)))
.tcp()
});
})
.await;
let response = srv.head("/").send().await.unwrap();
assert!(response.status().is_success());
@ -477,7 +493,8 @@ async fn test_h1_head_binary() {
ok::<_, ()>(Response::Ok().content_length(STR.len() as u64).body(STR))
})
.tcp()
});
})
.await;
let response = srv.head("/").send().await.unwrap();
assert!(response.status().is_success());
@ -501,7 +518,8 @@ async fn test_h1_head_binary2() {
HttpService::build()
.h1(|_| ok::<_, ()>(Response::Ok().body(STR)))
.tcp()
});
})
.await;
let response = srv.head("/").send().await.unwrap();
assert!(response.status().is_success());
@ -526,7 +544,8 @@ async fn test_h1_body_length() {
)
})
.tcp()
});
})
.await;
let response = srv.get("/").send().await.unwrap();
assert!(response.status().is_success());
@ -549,7 +568,8 @@ async fn test_h1_body_chunked_explicit() {
)
})
.tcp()
});
})
.await;
let response = srv.get("/").send().await.unwrap();
assert!(response.status().is_success());
@ -579,7 +599,8 @@ async fn test_h1_body_chunked_implicit() {
ok::<_, ()>(Response::Ok().streaming(body))
})
.tcp()
});
})
.await;
let response = srv.get("/").send().await.unwrap();
assert!(response.status().is_success());
@ -611,7 +632,8 @@ async fn test_h1_response_http_error_handling() {
)
}))
.tcp()
});
})
.await;
let response = srv.get("/").send().await.unwrap();
assert_eq!(response.status(), http::StatusCode::INTERNAL_SERVER_ERROR);
@ -627,7 +649,8 @@ async fn test_h1_service_error() {
HttpService::build()
.h1(|_| future::err::<Response, Error>(error::ErrorBadRequest("error")))
.tcp()
});
})
.await;
let response = srv.get("/").send().await.unwrap();
assert_eq!(response.status(), http::StatusCode::BAD_REQUEST);
@ -647,7 +670,8 @@ async fn test_h1_on_connect() {
future::ok::<_, ()>(Response::Ok().finish())
})
.tcp()
});
})
.await;
let response = srv.get("/").send().await.unwrap();
assert!(response.status().is_success());

View File

@ -93,7 +93,8 @@ async fn test_simple() {
.finish(|_| future::ok::<_, ()>(Response::NotFound()))
.tcp()
}
});
})
.await;
// client service
let mut framed = srv.ws().await.unwrap();

View File

@ -1,5 +1,13 @@
# Changes
## [Unreleased] - 2020-xx-xx
* Update the `time` dependency to 0.2.5
## [0.2.1] - 2020-01-10
* Fix panic with already borrowed: BorrowMutError #1263
## [0.2.0] - 2019-12-20
* Use actix-web 2.0

View File

@ -1,6 +1,6 @@
[package]
name = "actix-identity"
version = "0.2.0"
version = "0.2.1"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Identity service for actix web framework."
readme = "README.md"
@ -10,21 +10,20 @@ repository = "https://github.com/actix/actix-web.git"
documentation = "https://docs.rs/actix-identity/"
license = "MIT/Apache-2.0"
edition = "2018"
workspace = ".."
[lib]
name = "actix_identity"
path = "src/lib.rs"
[dependencies]
actix-web = { version = "2.0.0-rc", default-features = false, features = ["secure-cookies"] }
actix-service = "1.0.0"
actix-web = { version = "2.0.0", default-features = false, features = ["secure-cookies"] }
actix-service = "1.0.5"
futures = "0.3.1"
serde = "1.0"
serde_json = "1.0"
time = "0.1.42"
time = { version = "0.2.5", default-features = false, features = ["std"] }
[dev-dependencies]
actix-rt = "1.0.0"
actix-http = "1.0.1"
bytes = "0.5.3"
bytes = "0.5.4"

View File

@ -1,5 +1,7 @@
# Identity service for actix web framework [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-identity)](https://crates.io/crates/actix-identity) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
**This crate moved to https://github.com/actix/actix-extras.**
## Documentation & community resources
* [User Guide](https://actix.rs/docs/)

View File

@ -251,6 +251,15 @@ pub struct IdentityServiceMiddleware<S, T> {
service: Rc<RefCell<S>>,
}
impl<S, T> Clone for IdentityServiceMiddleware<S, T> {
fn clone(&self) -> Self {
Self {
backend: self.backend.clone(),
service: self.service.clone(),
}
}
}
impl<S, T, B> Service for IdentityServiceMiddleware<S, T>
where
B: 'static,
@ -279,7 +288,9 @@ where
req.extensions_mut()
.insert(IdentityItem { id, changed: false });
let mut res = srv.borrow_mut().call(req).await?;
// https://github.com/actix/actix-web/issues/1263
let fut = { srv.borrow_mut().call(req) };
let mut res = fut.await?;
let id = res.request().extensions_mut().remove::<IdentityItem>();
if let Some(id) = id {
@ -294,7 +305,7 @@ where
Err(err) => Ok(req.error_response(err)),
}
}
.boxed_local()
.boxed_local()
}
}
@ -417,14 +428,14 @@ impl CookieIdentityInner {
let now = SystemTime::now();
if let Some(visit_deadline) = self.visit_deadline {
if now.duration_since(value.visit_timestamp?).ok()?
> visit_deadline.to_std().ok()?
> visit_deadline
{
return None;
}
}
if let Some(login_deadline) = self.login_deadline {
if now.duration_since(value.login_timestamp?).ok()?
> login_deadline.to_std().ok()?
> login_deadline
{
return None;
}
@ -606,9 +617,10 @@ mod tests {
use std::borrow::Borrow;
use super::*;
use actix_service::into_service;
use actix_web::http::StatusCode;
use actix_web::test::{self, TestRequest};
use actix_web::{web, App, Error, HttpResponse};
use actix_web::{error, web, App, Error, HttpResponse};
const COOKIE_KEY_MASTER: [u8; 32] = [0; 32];
const COOKIE_NAME: &'static str = "actix_auth";
@ -843,7 +855,7 @@ mod tests {
let cv: CookieValue = serde_json::from_str(cookie.value()).unwrap();
assert_eq!(cv.identity, identity);
let now = SystemTime::now();
let t30sec_ago = now - Duration::seconds(30).to_std().unwrap();
let t30sec_ago = now - Duration::seconds(30);
match login_timestamp {
LoginTimestampCheck::NoTimestamp => assert_eq!(cv.login_timestamp, None),
LoginTimestampCheck::NewTimestamp => assert!(
@ -985,7 +997,7 @@ mod tests {
create_identity_server(|c| c.login_deadline(Duration::days(90))).await;
let cookie = login_cookie(
COOKIE_LOGIN,
Some(SystemTime::now() - Duration::days(180).to_std().unwrap()),
Some(SystemTime::now() - Duration::days(180)),
None,
);
let mut resp = test::call_service(
@ -1011,7 +1023,7 @@ mod tests {
let cookie = login_cookie(
COOKIE_LOGIN,
None,
Some(SystemTime::now() - Duration::days(180).to_std().unwrap()),
Some(SystemTime::now() - Duration::days(180)),
);
let mut resp = test::call_service(
&mut srv,
@ -1045,6 +1057,7 @@ mod tests {
assert_logged_in(resp, Some(COOKIE_LOGIN)).await;
}
// https://github.com/actix/actix-web/issues/1263
#[actix_rt::test]
async fn test_identity_cookie_updated_on_visit_deadline() {
let mut srv = create_identity_server(|c| {
@ -1052,7 +1065,7 @@ mod tests {
.login_deadline(Duration::days(90))
})
.await;
let timestamp = SystemTime::now() - Duration::days(1).to_std().unwrap();
let timestamp = SystemTime::now() - Duration::days(1);
let cookie = login_cookie(COOKIE_LOGIN, Some(timestamp), Some(timestamp));
let mut resp = test::call_service(
&mut srv,
@ -1069,4 +1082,47 @@ mod tests {
);
assert_logged_in(resp, Some(COOKIE_LOGIN)).await;
}
#[actix_rt::test]
async fn test_borrowed_mut_error() {
use futures::future::{lazy, ok, Ready};
struct Ident;
impl IdentityPolicy for Ident {
type Future = Ready<Result<Option<String>, Error>>;
type ResponseFuture = Ready<Result<(), Error>>;
fn from_request(&self, _: &mut ServiceRequest) -> Self::Future {
ok(Some("test".to_string()))
}
fn to_response<B>(
&self,
_: Option<String>,
_: bool,
_: &mut ServiceResponse<B>,
) -> Self::ResponseFuture {
ok(())
}
}
let mut srv = IdentityServiceMiddleware {
backend: Rc::new(Ident),
service: Rc::new(RefCell::new(into_service(|_: ServiceRequest| {
async move {
actix_rt::time::delay_for(std::time::Duration::from_secs(100)).await;
Err::<ServiceResponse, _>(error::ErrorBadRequest("error"))
}
}))),
};
let mut srv2 = srv.clone();
let req = TestRequest::default().to_srv_request();
actix_rt::spawn(async move {
let _ = srv2.call(req).await;
});
actix_rt::time::delay_for(std::time::Duration::from_millis(50)).await;
let _ = lazy(|cx| srv.poll_ready(cx)).await;
}
}

View File

@ -1,5 +1,11 @@
# Changes
## [0.2.1] - 2020-01-xx
* Remove the unused `time` dependency
* Fix missing `std::error::Error` implement for `MultipartError`.
## [0.2.0] - 2019-12-20
* Release
@ -44,4 +50,4 @@
* Split multipart support to separate crate
* Optimize multipart handling #634, #769
* Optimize multipart handling #634, #769

View File

@ -16,8 +16,8 @@ name = "actix_multipart"
path = "src/lib.rs"
[dependencies]
actix-web = { version = "2.0.0-rc", default-features = false }
actix-service = "1.0.0"
actix-web = { version = "3.0.0-alpha.2", default-features = false }
actix-service = "1.0.1"
actix-utils = "1.0.3"
bytes = "0.5.3"
derive_more = "0.99.2"
@ -25,9 +25,8 @@ httparse = "1.3"
futures = "0.3.1"
log = "0.4"
mime = "0.3"
time = "0.1"
twoway = "0.2"
[dev-dependencies]
actix-rt = "1.0.0"
actix-http = "1.0.0"
actix-http = "2.0.0-alpha.3"

View File

@ -33,6 +33,8 @@ pub enum MultipartError {
NotConsumed,
}
impl std::error::Error for MultipartError {}
/// Return `BadRequest` for `MultipartError`
impl ResponseError for MultipartError {
fn status_code(&self) -> StatusCode {

View File

@ -1,5 +1,10 @@
# Changes
## [Unreleased] - 2020-01-xx
* Update the `time` dependency to 0.2.5
* [#1292](https://github.com/actix/actix-web/pull/1292) Long lasting auto-prolonged session
## [0.3.0] - 2019-12-20
* Release

View File

@ -22,14 +22,14 @@ default = ["cookie-session"]
cookie-session = ["actix-web/secure-cookies"]
[dependencies]
actix-web = "2.0.0-rc"
actix-service = "1.0.0"
bytes = "0.5.3"
actix-web = { version = "2.0.0" }
actix-service = "1.0.5"
bytes = "0.5.4"
derive_more = "0.99.2"
futures = "0.3.1"
serde = "1.0"
serde_json = "1.0"
time = "0.1.42"
time = { version = "0.2.5", default-features = false, features = ["std"] }
[dev-dependencies]
actix-rt = "1.0.0"

View File

@ -1,5 +1,7 @@
# Session for actix web framework [![Build Status](https://travis-ci.org/actix/actix-web.svg?branch=master)](https://travis-ci.org/actix/actix-web) [![codecov](https://codecov.io/gh/actix/actix-web/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-web) [![crates.io](https://meritbadge.herokuapp.com/actix-session)](https://crates.io/crates/actix-session) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
**This crate moved to https://github.com/actix/actix-extras.**
## Documentation & community resources
* [User Guide](https://actix.rs/docs/)

View File

@ -27,6 +27,7 @@ use actix_web::{Error, HttpMessage, ResponseError};
use derive_more::{Display, From};
use futures::future::{ok, FutureExt, LocalBoxFuture, Ready};
use serde_json::error::Error as JsonError;
use time::{Duration, OffsetDateTime};
use crate::{Session, SessionStatus};
@ -56,7 +57,8 @@ struct CookieSessionInner {
domain: Option<String>,
secure: bool,
http_only: bool,
max_age: Option<time::Duration>,
max_age: Option<Duration>,
expires_in: Option<Duration>,
same_site: Option<SameSite>,
}
@ -71,6 +73,7 @@ impl CookieSessionInner {
secure: true,
http_only: true,
max_age: None,
expires_in: None,
same_site: None,
}
}
@ -96,6 +99,10 @@ impl CookieSessionInner {
cookie.set_domain(domain.clone());
}
if let Some(expires_in) = self.expires_in {
cookie.set_expires(OffsetDateTime::now() + expires_in);
}
if let Some(max_age) = self.max_age {
cookie.set_max_age(max_age);
}
@ -123,8 +130,8 @@ impl CookieSessionInner {
fn remove_cookie<B>(&self, res: &mut ServiceResponse<B>) -> Result<(), Error> {
let mut cookie = Cookie::named(self.name.clone());
cookie.set_value("");
cookie.set_max_age(time::Duration::seconds(0));
cookie.set_expires(time::now() - time::Duration::days(365));
cookie.set_max_age(Duration::zero());
cookie.set_expires(OffsetDateTime::now() - Duration::days(365));
let val = HeaderValue::from_str(&cookie.to_string())?;
res.headers_mut().append(SET_COOKIE, val);
@ -263,7 +270,7 @@ impl CookieSession {
/// Sets the `max-age` field in the session cookie being built.
pub fn max_age(self, seconds: i64) -> CookieSession {
self.max_age_time(time::Duration::seconds(seconds))
self.max_age_time(Duration::seconds(seconds))
}
/// Sets the `max-age` field in the session cookie being built.
@ -271,6 +278,17 @@ impl CookieSession {
Rc::get_mut(&mut self.0).unwrap().max_age = Some(value);
self
}
/// Sets the `expires` field in the session cookie being built.
pub fn expires_in(self, seconds: i64) -> CookieSession {
self.expires_in_time(Duration::seconds(seconds))
}
/// Sets the `expires` field in the session cookie being built.
pub fn expires_in_time(mut self, value: Duration) -> CookieSession {
Rc::get_mut(&mut self.0).unwrap().expires_in = Some(value);
self
}
}
impl<S, B: 'static> Transform<S> for CookieSession
@ -323,6 +341,7 @@ where
fn call(&mut self, mut req: ServiceRequest) -> Self::Future {
let inner = self.inner.clone();
let (is_new, state) = self.inner.load(&req);
let prolong_expiration = self.inner.expires_in.is_some();
Session::set_session(state.into_iter(), &mut req);
let fut = self.service.call(req);
@ -334,6 +353,9 @@ where
| (SessionStatus::Renewed, Some(state)) => {
res.checked_expr(|res| inner.set_cookie(res, state))
}
(SessionStatus::Unchanged, Some(state)) if prolong_expiration => {
res.checked_expr(|res| inner.set_cookie(res, state))
}
(SessionStatus::Unchanged, _) =>
// set a new session cookie upon first request (new client)
{
@ -354,7 +376,7 @@ where
}
})
}
.boxed_local()
.boxed_local()
}
}
@ -477,4 +499,47 @@ mod tests {
let body = test::read_response(&mut app, request).await;
assert_eq!(body, Bytes::from_static(b"counter: 100"));
}
#[actix_rt::test]
async fn prolong_expiration() {
let mut app = test::init_service(
App::new()
.wrap(CookieSession::signed(&[0; 32]).secure(false).expires_in(60))
.service(web::resource("/").to(|ses: Session| {
async move {
let _ = ses.set("counter", 100);
"test"
}
}))
.service(
web::resource("/test/")
.to(|| async move { "no-changes-in-session" }),
),
)
.await;
let request = test::TestRequest::get().to_request();
let response = app.call(request).await.unwrap();
let expires_1 = response
.response()
.cookies()
.find(|c| c.name() == "actix-session")
.expect("Cookie is set")
.expires()
.expect("Expiration is set");
actix_rt::time::delay_for(std::time::Duration::from_secs(1)).await;
let request = test::TestRequest::with_uri("/test/").to_request();
let response = app.call(request).await.unwrap();
let expires_2 = response
.response()
.cookies()
.find(|c| c.name() == "actix-session")
.expect("Cookie is set")
.expires()
.expect("Expiration is set");
assert!(expires_2 - expires_1 >= Duration::seconds(1));
}
}

View File

@ -37,7 +37,7 @@
//! )
//! .service(web::resource("/").to(|| HttpResponse::Ok())))
//! .bind("127.0.0.1:59880")?
//! .start()
//! .run()
//! .await
//! }
//! ```

View File

@ -1,5 +1,11 @@
# Changes
# [3.0.0-alpha.1] - 2020-05-08
* Update the actix-web dependency to 3.0.0-alpha.1
* Update the actix dependency to 0.10.0-alpha.2
* Update the actix-http dependency to 2.0.0-alpha.3
## [2.0.0] - 2019-12-20
* Release

View File

@ -1,6 +1,6 @@
[package]
name = "actix-web-actors"
version = "2.0.0"
version = "3.0.0-alpha.1"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix actors support for actix web framework."
readme = "README.md"
@ -16,9 +16,9 @@ name = "actix_web_actors"
path = "src/lib.rs"
[dependencies]
actix = "0.9.0"
actix-web = "2.0.0-rc"
actix-http = "1.0.1"
actix = "0.10.0-alpha.2"
actix-web = "3.0.0-alpha.2"
actix-http = "2.0.0-alpha.3"
actix-codec = "0.2.0"
bytes = "0.5.2"
futures = "0.3.1"
@ -26,4 +26,4 @@ pin-project = "0.4.6"
[dev-dependencies]
actix-rt = "1.0.0"
env_logger = "0.6"
env_logger = "0.7"

View File

@ -174,7 +174,7 @@ where
// frames
if let Some(data) = self.fut.ctx().stream.pop_front() {
Poll::Ready(data.map(|b| Ok(b)))
Poll::Ready(data.map(Ok))
} else if self.fut.alive() {
Poll::Pending
} else {

View File

@ -1,5 +1,13 @@
# Changes
## [0.2.1] - 2020-02-25
* Add `#[allow(missing_docs)]` attribute to generated structs [#1368]
* Allow the handler function to be named as `config` [#1290]
[#1368]: https://github.com/actix/actix-web/issues/1368
[#1290]: https://github.com/actix/actix-web/issues/1290
## [0.2.0] - 2019-12-13
* Generate code for actix-web 2.0

View File

@ -1,6 +1,6 @@
[package]
name = "actix-web-codegen"
version = "0.2.0"
version = "0.2.1"
description = "Actix web proc macros"
readme = "README.md"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
@ -17,6 +17,6 @@ syn = { version = "^1", features = ["full", "parsing"] }
proc-macro2 = "^1"
[dev-dependencies]
actix-rt = { version = "1.0.0" }
actix-web = { version = "2.0.0-rc" }
futures = { version = "0.3.1" }
actix-rt = "1.0.0"
actix-web = "3.0.0-alpha.2"
futures = "0.3.1"

View File

@ -21,6 +21,7 @@
//!
//! - `"path"` - Raw literal string with path for which to register handle. Mandatory.
//! - `guard="function_name"` - Registers function as guard using `actix_web::guard::fn_guard`
//! - `wrap="Middleware"` - Registers a resource middleware.
//!
//! ## Notes
//!
@ -45,7 +46,6 @@ extern crate proc_macro;
mod route;
use proc_macro::TokenStream;
use syn::parse_macro_input;
/// Creates route handler with `GET` method guard.
///
@ -55,14 +55,10 @@ use syn::parse_macro_input;
///
/// - `"path"` - Raw literal string with path for which to register handler. Mandatory.
/// - `guard="function_name"` - Registers function as guard using `actix_web::guard::fn_guard`
/// - `wrap="Middleware"` - Registers a resource middleware.
#[proc_macro_attribute]
pub fn get(args: TokenStream, input: TokenStream) -> TokenStream {
let args = parse_macro_input!(args as syn::AttributeArgs);
let gen = match route::Route::new(args, input, route::GuardType::Get) {
Ok(gen) => gen,
Err(err) => return err.to_compile_error().into(),
};
gen.generate()
route::generate(args, input, route::GuardType::Get)
}
/// Creates route handler with `POST` method guard.
@ -72,12 +68,7 @@ pub fn get(args: TokenStream, input: TokenStream) -> TokenStream {
/// Attributes are the same as in [get](attr.get.html)
#[proc_macro_attribute]
pub fn post(args: TokenStream, input: TokenStream) -> TokenStream {
let args = parse_macro_input!(args as syn::AttributeArgs);
let gen = match route::Route::new(args, input, route::GuardType::Post) {
Ok(gen) => gen,
Err(err) => return err.to_compile_error().into(),
};
gen.generate()
route::generate(args, input, route::GuardType::Post)
}
/// Creates route handler with `PUT` method guard.
@ -87,12 +78,7 @@ pub fn post(args: TokenStream, input: TokenStream) -> TokenStream {
/// Attributes are the same as in [get](attr.get.html)
#[proc_macro_attribute]
pub fn put(args: TokenStream, input: TokenStream) -> TokenStream {
let args = parse_macro_input!(args as syn::AttributeArgs);
let gen = match route::Route::new(args, input, route::GuardType::Put) {
Ok(gen) => gen,
Err(err) => return err.to_compile_error().into(),
};
gen.generate()
route::generate(args, input, route::GuardType::Put)
}
/// Creates route handler with `DELETE` method guard.
@ -102,12 +88,7 @@ pub fn put(args: TokenStream, input: TokenStream) -> TokenStream {
/// Attributes are the same as in [get](attr.get.html)
#[proc_macro_attribute]
pub fn delete(args: TokenStream, input: TokenStream) -> TokenStream {
let args = parse_macro_input!(args as syn::AttributeArgs);
let gen = match route::Route::new(args, input, route::GuardType::Delete) {
Ok(gen) => gen,
Err(err) => return err.to_compile_error().into(),
};
gen.generate()
route::generate(args, input, route::GuardType::Delete)
}
/// Creates route handler with `HEAD` method guard.
@ -117,12 +98,7 @@ pub fn delete(args: TokenStream, input: TokenStream) -> TokenStream {
/// Attributes are the same as in [head](attr.head.html)
#[proc_macro_attribute]
pub fn head(args: TokenStream, input: TokenStream) -> TokenStream {
let args = parse_macro_input!(args as syn::AttributeArgs);
let gen = match route::Route::new(args, input, route::GuardType::Head) {
Ok(gen) => gen,
Err(err) => return err.to_compile_error().into(),
};
gen.generate()
route::generate(args, input, route::GuardType::Head)
}
/// Creates route handler with `CONNECT` method guard.
@ -132,12 +108,7 @@ pub fn head(args: TokenStream, input: TokenStream) -> TokenStream {
/// Attributes are the same as in [connect](attr.connect.html)
#[proc_macro_attribute]
pub fn connect(args: TokenStream, input: TokenStream) -> TokenStream {
let args = parse_macro_input!(args as syn::AttributeArgs);
let gen = match route::Route::new(args, input, route::GuardType::Connect) {
Ok(gen) => gen,
Err(err) => return err.to_compile_error().into(),
};
gen.generate()
route::generate(args, input, route::GuardType::Connect)
}
/// Creates route handler with `OPTIONS` method guard.
@ -147,12 +118,7 @@ pub fn connect(args: TokenStream, input: TokenStream) -> TokenStream {
/// Attributes are the same as in [options](attr.options.html)
#[proc_macro_attribute]
pub fn options(args: TokenStream, input: TokenStream) -> TokenStream {
let args = parse_macro_input!(args as syn::AttributeArgs);
let gen = match route::Route::new(args, input, route::GuardType::Options) {
Ok(gen) => gen,
Err(err) => return err.to_compile_error().into(),
};
gen.generate()
route::generate(args, input, route::GuardType::Options)
}
/// Creates route handler with `TRACE` method guard.
@ -162,12 +128,7 @@ pub fn options(args: TokenStream, input: TokenStream) -> TokenStream {
/// Attributes are the same as in [trace](attr.trace.html)
#[proc_macro_attribute]
pub fn trace(args: TokenStream, input: TokenStream) -> TokenStream {
let args = parse_macro_input!(args as syn::AttributeArgs);
let gen = match route::Route::new(args, input, route::GuardType::Trace) {
Ok(gen) => gen,
Err(err) => return err.to_compile_error().into(),
};
gen.generate()
route::generate(args, input, route::GuardType::Trace)
}
/// Creates route handler with `PATCH` method guard.
@ -177,10 +138,5 @@ pub fn trace(args: TokenStream, input: TokenStream) -> TokenStream {
/// Attributes are the same as in [patch](attr.patch.html)
#[proc_macro_attribute]
pub fn patch(args: TokenStream, input: TokenStream) -> TokenStream {
let args = parse_macro_input!(args as syn::AttributeArgs);
let gen = match route::Route::new(args, input, route::GuardType::Patch) {
Ok(gen) => gen,
Err(err) => return err.to_compile_error().into(),
};
gen.generate()
route::generate(args, input, route::GuardType::Patch)
}

View File

@ -2,8 +2,8 @@ extern crate proc_macro;
use proc_macro::TokenStream;
use proc_macro2::{Span, TokenStream as TokenStream2};
use quote::{quote, ToTokens, TokenStreamExt};
use syn::{AttributeArgs, Ident, NestedMeta};
use quote::{format_ident, quote, ToTokens, TokenStreamExt};
use syn::{AttributeArgs, Ident, NestedMeta, parse_macro_input};
enum ResourceType {
Async,
@ -12,11 +12,7 @@ enum ResourceType {
impl ToTokens for ResourceType {
fn to_tokens(&self, stream: &mut TokenStream2) {
let ident = match self {
ResourceType::Async => "to",
ResourceType::Sync => "to",
};
let ident = Ident::new(ident, Span::call_site());
let ident = format_ident!("to");
stream.append(ident);
}
}
@ -52,8 +48,7 @@ impl GuardType {
impl ToTokens for GuardType {
fn to_tokens(&self, stream: &mut TokenStream2) {
let ident = self.as_str();
let ident = Ident::new(ident, Span::call_site());
let ident = Ident::new(self.as_str(), Span::call_site());
stream.append(ident);
}
}
@ -61,12 +56,14 @@ impl ToTokens for GuardType {
struct Args {
path: syn::LitStr,
guards: Vec<Ident>,
wrappers: Vec<syn::Type>,
}
impl Args {
fn new(args: AttributeArgs) -> syn::Result<Self> {
let mut path = None;
let mut guards = Vec::new();
let mut wrappers = Vec::new();
for arg in args {
match arg {
NestedMeta::Lit(syn::Lit::Str(lit)) => match path {
@ -90,21 +87,31 @@ impl Args {
"Attribute guard expects literal string!",
));
}
} else if nv.path.is_ident("wrap") {
if let syn::Lit::Str(lit) = nv.lit {
wrappers.push(lit.parse()?);
} else {
return Err(syn::Error::new_spanned(
nv.lit,
"Attribute wrap expects type",
));
}
} else {
return Err(syn::Error::new_spanned(
nv.path,
"Unknown attribute key is specified. Allowed: guard",
"Unknown attribute key is specified. Allowed: guard and wrap",
));
}
}
arg => {
return Err(syn::Error::new_spanned(arg, "Unknown attribute"));
return Err(syn::Error::new_spanned(arg, "Unknown attribute."));
}
}
}
Ok(Args {
path: path.unwrap(),
guards,
wrappers,
})
}
}
@ -181,32 +188,49 @@ impl Route {
guard,
})
}
}
pub fn generate(&self) -> TokenStream {
let name = &self.name;
impl ToTokens for Route {
fn to_tokens(&self, output: &mut TokenStream2) {
let Self {
name,
guard,
ast,
args: Args { path, guards, wrappers },
resource_type,
} = self;
let resource_name = name.to_string();
let guard = &self.guard;
let ast = &self.ast;
let path = &self.args.path;
let extra_guards = &self.args.guards;
let resource_type = &self.resource_type;
let stream = quote! {
#[allow(non_camel_case_types)]
#[allow(non_camel_case_types, missing_docs)]
pub struct #name;
impl actix_web::dev::HttpServiceFactory for #name {
fn register(self, config: &mut actix_web::dev::AppService) {
fn register(self, __config: &mut actix_web::dev::AppService) {
#ast
let resource = actix_web::Resource::new(#path)
let __resource = actix_web::Resource::new(#path)
.name(#resource_name)
.guard(actix_web::guard::#guard())
#(.guard(actix_web::guard::fn_guard(#extra_guards)))*
#(.guard(actix_web::guard::fn_guard(#guards)))*
#(.wrap(#wrappers))*
.#resource_type(#name);
actix_web::dev::HttpServiceFactory::register(resource, config)
actix_web::dev::HttpServiceFactory::register(__resource, __config)
}
}
};
stream.into()
output.extend(stream);
}
}
pub(crate) fn generate(
args: TokenStream,
input: TokenStream,
guard: GuardType,
) -> TokenStream {
let args = parse_macro_input!(args as syn::AttributeArgs);
match Route::new(args, input, guard) {
Ok(route) => route.into_token_stream().into(),
Err(err) => err.to_compile_error().into(),
}
}

View File

@ -1,6 +1,17 @@
use actix_web::{http, test, web::Path, App, HttpResponse, Responder};
use std::pin::Pin;
use std::task::{Context, Poll};
use actix_web::{http, test, web::Path, App, HttpResponse, Responder, Error};
use actix_web::dev::{Service, Transform, ServiceRequest, ServiceResponse};
use actix_web_codegen::{connect, delete, get, head, options, patch, post, put, trace};
use futures::{future, Future};
use actix_web::http::header::{HeaderName, HeaderValue};
// Make sure that we can name function as 'config'
#[get("/config")]
async fn config() -> impl Responder {
HttpResponse::Ok()
}
#[get("/test")]
async fn test_handler() -> impl Responder {
@ -67,6 +78,65 @@ async fn get_param_test(_: Path<String>) -> impl Responder {
HttpResponse::Ok()
}
pub struct ChangeStatusCode;
impl<S, B> Transform<S> for ChangeStatusCode
where
S: Service<Request = ServiceRequest, Response = ServiceResponse<B>, Error = Error>,
S::Future: 'static,
B: 'static,
{
type Request = ServiceRequest;
type Response = ServiceResponse<B>;
type Error = Error;
type InitError = ();
type Transform = ChangeStatusCodeMiddleware<S>;
type Future = future::Ready<Result<Self::Transform, Self::InitError>>;
fn new_transform(&self, service: S) -> Self::Future {
future::ok(ChangeStatusCodeMiddleware { service })
}
}
pub struct ChangeStatusCodeMiddleware<S> {
service: S,
}
impl<S, B> Service for ChangeStatusCodeMiddleware<S>
where
S: Service<Request = ServiceRequest, Response = ServiceResponse<B>, Error = Error>,
S::Future: 'static,
B: 'static,
{
type Request = ServiceRequest;
type Response = ServiceResponse<B>;
type Error = Error;
type Future = Pin<Box<dyn Future<Output = Result<Self::Response, Self::Error>>>>;
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.service.poll_ready(cx)
}
fn call(&mut self, req: ServiceRequest) -> Self::Future {
let fut = self.service.call(req);
Box::pin(async move {
let mut res = fut.await?;
let headers = res.headers_mut();
let header_name = HeaderName::from_lowercase(b"custom-header").unwrap();
let header_value = HeaderValue::from_str("hello").unwrap();
headers.insert(header_name, header_value);
Ok(res)
})
}
}
#[get("/test/wrap", wrap = "ChangeStatusCode")]
async fn get_wrap(_: Path<String>) -> impl Responder {
HttpResponse::Ok()
}
#[actix_rt::test]
async fn test_params() {
let srv = test::start(|| {
@ -149,3 +219,15 @@ async fn test_auto_async() {
let response = request.send().await.unwrap();
assert!(response.status().is_success());
}
#[actix_rt::test]
async fn test_wrap() {
let srv = test::start(|| {
App::new()
.service(get_wrap)
});
let request = srv.request(http::Method::GET, srv.url("/test/wrap"));
let response = request.send().await.unwrap();
assert!(response.headers().contains_key("custom-header"));
}

Some files were not shown because too many files have changed in this diff Show More