1
0
mirror of https://github.com/fafhrd91/actix-net synced 2025-08-12 01:51:36 +02:00

Compare commits

..

362 Commits

Author SHA1 Message Date
Rob Ede
a95afe2800 prepare router release 0.2.6 2021-01-09 14:18:20 +00:00
Rob Ede
f751cf5acb use convert err on forward_ready! (#246) 2021-01-09 14:13:16 +00:00
fakeshadow
a1982bdbad add actix-rt::task (#245) 2021-01-03 18:16:57 +00:00
Rob Ede
147c4f4f2c test bytestring with ahash 2021-01-03 04:42:08 +00:00
Rob Ede
5285656bdc prepare next beta releases 2021-01-03 04:39:37 +00:00
Rob Ede
296294061f update readme 2020-12-31 02:52:55 +00:00
Rob Ede
93865de848 move router to actix-router 2020-12-31 02:29:27 +00:00
Rob Ede
6bcf6d8160 use bytestring crate name as dir name 2020-12-31 02:21:50 +00:00
Rob Ede
14ff379150 prepare bytestring release 1.0.0 (#243) 2020-12-31 02:20:49 +00:00
fakeshadow
647817ef14 tokio 1.0 and mio 0.7 (#204) 2020-12-30 22:11:50 +00:00
fakeshadow
b5eefb4d42 merge actix-testing into actix-server (#242) 2020-12-29 21:20:24 +00:00
fakeshadow
03eb96d6d4 fix actix-tls tests (#241) 2020-12-29 11:36:17 +00:00
Rob Ede
0934078947 prepare tls beta release 2020-12-29 01:04:21 +00:00
Rob Ede
5759c9e144 merge -connect and -tls and upgrade to rt v2 (#238) 2020-12-29 00:38:41 +00:00
Rob Ede
3c6de3a81b use correct service version for tracing 2020-12-29 00:08:59 +00:00
Rob Ede
ef83647ac9 prepare testing beta release 2020-12-28 23:54:21 +00:00
Rob Ede
98a17081b8 prepare server beta release 2020-12-28 23:50:00 +00:00
fakeshadow
b7202db8fd update actix-server and actix-testing to tokio 1.0 (#239) 2020-12-28 23:44:53 +00:00
Rob Ede
a09f9abfcb prepare utils release 3.0.0-beta.1 2020-12-28 03:32:28 +00:00
Rob Ede
e4a44b77e6 prepare codec release 0.4.0-beta.1 2020-12-28 03:24:43 +00:00
fakeshadow
2ee8f45f5d update actix-codec and actix-utils to tokio 1.0 (#237) 2020-12-28 03:16:37 +00:00
Rob Ede
f48e3f4cb0 prepare release for rt and service 2020-12-28 01:58:31 +00:00
Rob Ede
3d3bd60368 fix rt override 2020-12-28 01:53:11 +00:00
Rob Ede
d684128831 fix rt override 2020-12-28 01:48:19 +00:00
fakeshadow
0c12930796 update to tokio 1.0 for actix-rt (#236) 2020-12-28 01:40:22 +00:00
Rob Ede
ba44ea7d0b remove futures-util from service deps (#235) 2020-12-27 18:24:57 +00:00
Rob Ede
8a58a341a4 service improvements (#233) 2020-12-27 14:15:42 +00:00
Rob Ede
33c9aa6988 bump msrv to 1.46 2020-12-27 04:36:08 +00:00
Rob Ede
3ab8c3eb69 service trait takes request type parameter (#232) 2020-12-27 04:28:00 +00:00
fakeshadow
518bf3f6a6 remove RUNNING Q PENDING thread locals from actix-rt (#207) 2020-12-26 23:26:02 +00:00
fakeshadow
43ce25cda1 Remove unused mods in actix-utils (#229) 2020-12-26 21:27:59 +00:00
Yuki Okushi
4e4122b702 Disable PR comment from codecov 2020-12-17 21:42:21 +09:00
Aravinth Manivannan
b296d0f254 Intradoc links conversion (#227)
* intra doc conversion

* rm trailing blank comment
2020-12-14 08:22:30 +00:00
Juan Aguilar
02a902068f Refactor LocalWaker (#224) 2020-12-13 19:26:57 +00:00
fakeshadow
049795662f remove ServerMessage type. remove one unused InternalServiceFactory impl (#225) 2020-12-13 00:46:32 +00:00
Rob Ede
4e43216b99 standardise compiler lints across all crates (#226) 2020-12-12 23:24:00 +00:00
Rob Ede
93889776c4 prevent double registration of sockets when backpressure is resolved (#223) 2020-12-12 17:19:20 +00:00
Yuki Okushi
ab496a71b5 Fix release date 2020-12-03 08:59:59 +09:00
Yuki Okushi
76d956e25c macros: Add actix-reexport feature (#218) 2020-12-03 08:59:13 +09:00
Ivan Babrou
89e56cf661 Notify about paused accept loop (#215) 2020-11-29 15:30:13 +00:00
Rob Ede
8aca8d4d07 fix clippy warnings (#214)
and make my spelling checker happy
2020-11-25 01:41:14 +00:00
fakeshadow
e0dd2a3d76 remove actix-threadpool re-export from actix-rt (#212) 2020-11-24 17:03:09 +00:00
Rob Ede
59e976aaca address clippy error (#213) 2020-11-24 16:35:47 +00:00
Zura Benashvili
4cc1c87724 docs(transform): remove extra generic parameter (#211) 2020-11-20 22:45:57 +00:00
Yuki Okushi
ca39917d2c Update CoC contact information 2020-10-31 12:08:06 +09:00
ghizzo01
704af672b9 Bump pin-project to 1.0 (#202) 2020-10-25 19:42:40 +09:00
Rob Ede
242bef269f delete ioframe removed package readme
closes #199
2020-09-22 12:29:07 +01:00
Rob Ede
6c65e2a79f prepare router 0.2.5 release (#198) 2020-09-21 22:46:59 +01:00
nujz
e5ca271764 actix-router: fix from_hex error (#196) 2020-09-20 18:04:18 +01:00
nujz
98a2197a09 fix doc error (#195) 2020-09-19 23:12:41 +09:00
Rob Ede
fb0aa02b3c move and update server+tls examples (#190) 2020-09-13 10:12:07 +01:00
Rob Ede
681eeb497d prepare server release 1.0.4 (#188) 2020-09-12 15:28:17 +01:00
Igor Aleksanov
3e04b87311 actix-service: Fix broken link in readme (#189) 2020-09-12 15:08:03 +01:00
Rob Ede
77b7826658 prepare tls v2 release (#186) 2020-09-08 18:00:07 +01:00
Igor Aleksanov
b7a9cb7bb4 actix-rt: Make the process of running System in existing Runtime more clear (#173) 2020-09-06 11:01:24 +01:00
Robert Gabriel Jakabosky
88d99ac89c Fix clippy errors. (#187) 2020-09-06 10:41:42 +01:00
Rob Ede
7632f51509 prepare connect v2 stable release (#185) 2020-09-02 22:14:07 +01:00
Rob Ede
d28687d0d7 promote codec/utils out of beta (#184) 2020-08-24 09:18:37 +01:00
Rob Ede
27c6be9881 remove unused type parameter from Framed::replace_codec (#183) 2020-08-20 00:30:26 +01:00
Rob Ede
119dc39f5b prepare codec and utils betas (#182) 2020-08-19 11:00:12 +01:00
Rob Ede
b3010c13e0 solve framed integration with actix-http (#179) 2020-08-18 23:27:37 +01:00
Adrian Wechner
fecdfcd8d4 assert workers greater than zero (#167) 2020-08-18 16:44:22 +01:00
Yuki Okushi
578a560853 connect,tls: Bump up to next alpha versions (#181) 2020-08-17 15:39:17 +01:00
Rob Ede
fb098536ee bump MSRV to 1.42 (#180) 2020-08-17 15:37:57 +01:00
Rob Ede
5d28be9ad6 fix actix-service readme reference (#176) 2020-08-11 12:20:09 +01:00
Rob Ede
a5a6b6704c prepare actix-service 1.0.6 release (#175) 2020-08-09 16:10:58 +01:00
Igor Aleksanov
afb0a3c9fc actix-service: Fix clippy warning in benches (#174) 2020-08-07 17:16:45 +09:00
Miloas
02aaa75591 fix actix-service doc error (#172) 2020-08-06 11:21:51 +01:00
Yuki Okushi
ed4b708c66 Fix CI on MSRV check (#171) 2020-08-05 09:02:41 +09:00
Yuki Okushi
235a76dcd4 GHA: Switch action to the official setup-msys2 (#169) 2020-07-29 08:47:32 +09:00
Matt Kantor
0c5f1da625 Remove garbled doc comment for actix_router::IntoPattern::is_single (#168) 2020-07-29 05:46:53 +09:00
Yuki Okushi
8ace9264b7 Check code style with rustfmt on CI (#164) 2020-07-22 12:32:13 +09:00
Yuki Okushi
0dca1a705a actix-utils: Remove unsound custom Cell as well (#161) 2020-07-22 01:14:32 +01:00
Juan Aguilar
5d6d309e66 Simplify bcodec decode (#162) 2020-07-20 23:09:24 +09:00
Juan Aguilar
8d0bd7ce1c Improve bcodec encode performance (#157) 2020-07-19 22:36:51 +01:00
Sergey "Shnatsel" Davidoff
a67e38b4a0 Remove unsound custom Cell (#158) 2020-07-20 06:05:36 +09:00
Rob Ede
334c98575a Upgrade tokio utils to 0.3 (#138) 2020-07-20 05:44:26 +09:00
Rob Ede
a9b5a7b070 Create PULL_REQUEST_TEMPLATE.md (#159) 2020-07-20 03:01:09 +09:00
Yuki Okushi
61176f6410 Update rustls-related dependencies (#154) 2020-07-14 11:14:06 +01:00
Yuki Okushi
10b4c30a06 Use OR instead of deprecated / in license field (#155) 2020-07-14 11:11:30 +01:00
Yuki Okushi
7f550bcf0f threadpool: Bump up to 0.3.3 (#156) 2020-07-14 11:10:15 +01:00
Yuki Okushi
887f11f787 Merge pull request #153 from actix/tweak-actions
Tweak actions trigger events
2020-07-08 09:04:05 +09:00
Yuki Okushi
e2a6d352b0 Tweak actions trigger events 2020-07-08 08:38:24 +09:00
Yuki Okushi
f6c697a2dd Merge pull request #152 from paolobarbolini/pl-011
Update parking_lot to 0.11
2020-07-04 03:20:08 +09:00
Paolo Barbolini
5ecdfd684a Update parking_lot to 0.11 2020-07-03 17:37:10 +02:00
Yuki Okushi
7140c04c44 Merge pull request #149 from taiki-e/pin-project
Remove uses of pin_project::project attribute
2020-06-07 02:01:08 +09:00
Taiki Endo
9528df4486 Remove uses of pin_project::project attribute
pin-project will deprecate the project attribute due to some unfixable
limitations.

Refs: https://github.com/taiki-e/pin-project/issues/225
2020-06-06 06:42:45 +09:00
Pen Tree
755a8bb9d1 fix codec doc links (#148) 2020-06-02 18:05:39 +01:00
Yuki Okushi
f3cb6efc30 Merge pull request #146 from actix/cache-v2
Update `actions/cache` to v2
2020-05-28 04:59:34 +09:00
Yuki Okushi
87b857705c Update actions/cache to v2 2020-05-28 03:14:01 +09:00
Yuki Okushi
c897c5d3eb Merge pull request #145 from JohnTitor/new-threalpool
threadpool: Bump up to 0.3.2
2020-05-20 15:24:39 +09:00
Yuki Okushi
134e76b8b4 threadpool: Bump up to 0.3.2 2020-05-20 14:19:16 +09:00
Yuki Okushi
f3a401c23b Merge pull request #144 from JohnTitor/codecov-config
Add codecov config
2020-05-20 11:03:31 +09:00
Yuki Okushi
f7e8a912b3 Add codecov config 2020-05-19 14:45:39 +09:00
Yuki Okushi
11a1e11858 Merge pull request #143 from JohnTitor/new-testing
testing: Bump up to 1.0.1
2020-05-19 14:37:54 +09:00
Yuki Okushi
d0b27ee7e6 testing: Bump up to 1.0.1 2020-05-19 14:08:08 +09:00
Yuki Okushi
2d2b0591a2 Merge pull request #142 from JohnTitor/new-server
server: Bump up to 1.0.3
2020-05-19 13:58:39 +09:00
Yuki Okushi
abbc5f715f server: Bump up to 1.0.3 2020-05-19 10:23:17 +09:00
Yuki Okushi
140a6c76e3 Merge pull request #141 from actix/fix-ci
Only check compilation on mingw CI
2020-05-19 09:39:03 +09:00
Yuki Okushi
2395b28c5e Only check compilation on mingw CI
Disabled to run tests since somehow linking with OpenSSL is broken.
2020-05-19 09:11:27 +09:00
Yuki Okushi
aad4812ba6 Merge pull request #140 from JohnTitor/replace-net2
Replace deprecated `net2` crate with `socket2`
2020-05-19 08:58:40 +09:00
Yuki Okushi
ac6c78c476 testing: Replace net2 crate with socket2 2020-05-19 08:21:40 +09:00
Yuki Okushi
8218a098e8 server: Replace net2 crate with socket2 2020-05-19 08:17:44 +09:00
Yuki Okushi
49a6f525be Merge pull request #139 from JohnTitor/next-macros
macros: Bump up to 0.1.2
2020-05-19 07:50:46 +09:00
Yuki Okushi
f59ff82395 macros: Bump up to 0.1.2 2020-05-18 15:36:23 +09:00
Yuki Okushi
f7cc62564d Merge pull request #136 from JohnTitor/connect-alpha-3
actix-connect: Bump up to 2.0.0-alpha.3
2020-05-08 01:36:16 +09:00
Yuki Okushi
b125e2bdce actix-connect: Bump up to 2.0.0-alpha.3 2020-05-08 01:07:57 +09:00
Yuki Okushi
a5c185e80e Merge pull request #135 from actix/fix/unresolverd
correct spelling of ConnectError::Unresolved
2020-05-06 14:45:30 +09:00
Rob Ede
523cee0351 correct spelling of ConnectError::Unresolved 2020-05-03 23:14:22 +01:00
Yuki Okushi
343b3c09fc Merge pull request #134 from JohnTitor/new-rt
Bump up `actix-rt` to 1.1.1
2020-04-30 14:34:17 +09:00
Yuki Okushi
8a10580663 Bump up actix-rt to 1.1.1 2020-04-30 03:07:12 +09:00
Yuki Okushi
1b4a117063 Merge pull request #128 from Jonathas-Conceicao/topic/fix_memory_leak
actix-rt: Spawn future to cleanup pending JoinHandles
2020-04-30 02:58:13 +09:00
Yuki Okushi
700997fe48 Merge pull request #133 from actix/macro-compile-testing
add macro compile tests
2020-04-29 15:33:00 +09:00
Rob Ede
4c5568ed70 add trybuild compile tests 2020-04-26 20:11:16 +01:00
Yuki Okushi
7d0cfe1b4d Merge pull request #131 from danpintara/pull-1
actix-macros: Simplify test macros by using original signature
2020-04-23 02:33:52 +09:00
Daniel Pintara
e35c261c9f actix-macros: test: Simplify by using #sig instead of #name(#inputs) #ret 2020-04-22 00:13:32 +07:00
Yuki Okushi
115ef3fcb3 Merge pull request #130 from JohnTitor/dont-clone
Remove unnecessary clone usage
2020-04-20 08:37:10 +09:00
Yuki Okushi
c0482e2532 Remove unnecessary clone usage 2020-04-20 08:02:08 +09:00
Jonathas-Conceicao
6906f25e01 actix-rt: Set threshold size for arbiter's pending futures list
Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-04-16 03:12:05 -03:00
Jonathas-Conceicao
06bca19524 actix-rt: Spawn future to cleanup pending JoinHandles
Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-04-09 20:36:44 -03:00
Yuki Okushi
e9e2185296 Merge pull request #127 from rubdos/test-fixture-integration
Forward actix_rt::test arguments to test function.
2020-04-09 17:45:17 +09:00
Ruben De Smet
aae52a80ab Forward actix_rt::test arguments to test function.
Previously,

```rust
async fn foo(_a: u32) {}
```

would compile to

```rust
fn foo() {/* something */}
```

This patches changes this behaviour to

```rust
fn foo(_a: u32) {/* something */}
```

by simply forwarding the input arguments.

This allows any test fixture library (e.g. `rstest`, cfr.
https://github.com/la10736/rstest/issues/85) to integrate with
actix::test.
2020-04-08 16:48:10 +02:00
Yuki Okushi
65e2e8052e Release actix-rt 1.1.0 (#126)
* Release actix-rt 1.1.0

* Update actix-rt/CHANGES.md
2020-04-08 16:34:07 +09:00
Jonathas-Conceicao
783880bb0a actix-rt: Add Arbiter::is_running helper and fix System::is_set doc
`Arbiter::is_running` can be used to check if the current even-loop is currently
running; which should also work after the system has stopped. `System::is_set`
was updated to reflect what it actually does, it tells if the event loop has
started, which alone can't tell if it has stopped.

Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-04-05 21:00:54 -03:00
Jonathas-Conceicao
69e8df9d62 actix-rt: Run rustfmt
Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-04-05 21:00:54 -03:00
Yuki Okushi
9addf1a36b Merge pull request #125 from actix/fix/noisy-check
fix noisy check warning
2020-04-05 13:20:25 +09:00
Rob Ede
187a58472d fix noisy check warning 2020-04-04 23:57:52 +01:00
Nikolay Kim
30aa0b7bb6 add serde support to bytestring 2020-03-30 11:54:40 +06:00
Yuki Okushi
e775d08d76 Merge pull request #122 from actix/JohnTitor-patch-1
Upload coverage on PRs
2020-03-18 05:31:59 +09:00
Yuki Okushi
d5f95b54b7 Upload coverage on PRs 2020-03-18 05:03:37 +09:00
Yuki Okushi
904f90abc2 Merge pull request #121 from actix/revert-115-JohnTitor-patch-2
Revert "Disable windows-mingw builder temporarily"
2020-03-16 18:06:42 +09:00
Yuki Okushi
950c73077c Revert "Disable windows-mingw builder temporarily" 2020-03-16 17:31:10 +09:00
Yuki Okushi
732731a9c8 Merge pull request #120 from kornelski/err
std Error for BlockingError
2020-03-14 00:14:42 +09:00
Kornel Lesiński
0dd5a7ce1d std Error for BlockingError
#93
2020-03-13 12:35:20 +00:00
Yuki Okushi
7105091e51 Merge pull request #119 from JohnTitor/futures
Minimize `futures-*` dependencies
2020-03-13 05:12:37 +09:00
Yuki Okushi
08959dfc21 actix-tracing: Minimize futures-util dependencies 2020-03-12 07:13:32 +09:00
Yuki Okushi
2792433ad6 actix-codec: Minimize futures-* dependencies 2020-03-12 07:13:32 +09:00
Yuki Okushi
437a7b05c6 actix-rt: Fix build 2020-03-12 07:13:32 +09:00
Yuki Okushi
3d125c5381 actix-testing: Remove unused deps 2020-03-12 07:13:32 +09:00
Yuki Okushi
fbf7d6ef33 Update examples 2020-03-12 07:13:32 +09:00
Yuki Okushi
e6b6f08369 actix-utils: Minimize futures-* dependencies 2020-03-12 07:13:32 +09:00
Yuki Okushi
4e806b3e3f actix-tls: Minimize futures-* dependencies 2020-03-12 07:13:31 +09:00
Yuki Okushi
f5b07053fc actix-server: Minimize futures-* dependencies 2020-03-12 07:13:31 +09:00
Yuki Okushi
dd3bec83bf actix-ioframe: Minimize futures-* dependencies 2020-03-12 07:13:31 +09:00
Yuki Okushi
f955e49930 actix-connect: Minimize futures-* dependencies 2020-03-12 04:22:38 +09:00
Yuki Okushi
4be11b541b Merge pull request #117 from actix/new-connect
Release actix-http v2.0.0-alpha.2
2020-03-08 15:13:52 +09:00
Yuki Okushi
baba533407 Update actix-http dependency 2020-03-08 14:38:07 +09:00
Yuki Okushi
2bf50826b0 Bump up to 2.0.0-alpha.2 2020-03-08 14:37:33 +09:00
Yuki Okushi
41b2a3b2e2 Merge pull request #116 from Jonathas-Conceicao/topic/upgrade_trust_dns
actix-connect: Upgrade versions of trust-dns
2020-03-08 14:31:07 +09:00
Jonathas-Conceicao
7fdd4a1118 actix-connect: Upgrade versions of trust-dns
- `Address` trait is now required to have static lifetime;
- `start_resolver` and `start_default_resolver` are now `async` and may return
  a `ConnectError`;

Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-03-07 14:52:41 -03:00
Jonathas-Conceicao
cb30f9e86a actix-connect: Run cargo fmt
Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-03-07 14:37:39 -03:00
Yuki Okushi
873f69be51 Merge pull request #115 from actix/JohnTitor-patch-2
Disable windows-mingw builder temporarily
2020-03-06 14:11:50 +09:00
Yuki Okushi
0967061f30 Merge pull request #114 from actix/JohnTitor-patch-1
Unpin quote version
2020-03-06 14:11:28 +09:00
Yuki Okushi
59902cb3a3 Disable windows-mingw builder temporarily 2020-03-06 13:48:55 +09:00
Yuki Okushi
857e50120b Unpin quote version 2020-03-06 13:45:21 +09:00
Yuki Okushi
36a2edf1cd Merge pull request #111 from dunnock/master
Fix build with failing quote
2020-03-05 23:05:19 +09:00
Maksym Vorobiov
346bd072d3 fix build with failing quote 2020-03-05 14:58:44 +02:00
Yuki Okushi
8d3d58b3b7 Merge pull request #110 from Aaron1011/fix/better-pin
Replace calls to `Pin::new_unchecked` with `pin_project`.
2020-03-05 21:52:55 +09:00
Aaron Hill
c41b5d8dd4 Replace calls to Pin::new_unchecked with pin_project.
This is a breaking change, as it changes some public methods to take
`Pin<&mut Self>` rather than `&mut self`.

This brings these methods into line with `Stream::poll_next`, which also
takes a `Pin<&mut Self>`
2020-03-04 12:08:52 -05:00
Yuki Okushi
693d5132a9 Merge pull request #109 from JohnTitor/new-tls
actix-tls: Bump up to 2.0.0-alpha.1
2020-03-03 22:29:08 +09:00
Yuki Okushi
f7dac3feb4 Bump up to 2.0.0-alpha.1 2020-03-03 19:47:40 +09:00
Yuki Okushi
ebc11d03f2 Merge pull request #108 from JohnTitor/new-connect
Release `actix-connect` v2.0.0-alpha.1
2020-03-03 18:33:08 +09:00
Yuki Okushi
e3ad5de270 Update actix-connect dependency 2020-03-03 17:24:41 +09:00
Yuki Okushi
91118bb2ce Bump up to 2.0.0-alpha.1 2020-03-03 17:24:25 +09:00
Yuki Okushi
6628688bcf Merge pull request #107 from JohnTitor/rustls-017
Update `rustls` and `tokio-rustls`
2020-03-01 23:48:13 +09:00
Yuki Okushi
b9567359fd actix-tls: Update rustls and tokio-rustls 2020-03-01 12:08:14 +09:00
Yuki Okushi
7dbc0264b1 actix-connect: Update rustls and tokio-rustls 2020-03-01 12:08:14 +09:00
Erich Gubler
1b7c969f6a actix-rt: minimize futures dependencies to futures-{channel,util} with default features off (#104)
* build(deps): minimize `futures` deps by using `futures-channel` and `futures-util` directly

* style(actix-rt): enforce spaces around equals in `Cargo.toml`
2020-02-27 01:15:21 +09:00
Jonathas-Conceicao
f1685d8253 Add Arbiter::local_join associated function
Arbiter::local_join function can be used to await for futures spawned
on current arbiter.

Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-02-26 12:59:46 -03:00
Jonathas-Conceicao
e3b6a33b97 Add integration tests
These initial tests validade basic usage with timed futures for:
- `System::block_on`;
- `Arbiter::new`;
- `Arbiter::stop`;
- `Arbiter::join`;

Signed-off-by: Jonathas-Conceicao <jadoliveira@inf.ufpel.edu.br>
2020-02-26 12:59:46 -03:00
Yuki Okushi
13b503435f Merge pull request #106 from JohnTitor/server-102
Release actix-server 1.0.2
2020-02-26 20:53:00 +09:00
Yuki Okushi
98f0290f65 actix-server: Bump up to 1.0.2 2020-02-26 19:48:52 +09:00
Yuki Okushi
b8f66f5e7f Update changelog 2020-02-26 19:48:41 +09:00
Yuki Okushi
dd59ee498e Add FIXME comment 2020-02-26 19:48:27 +09:00
Dany Laporte
83320efa31 Avoid error by register() on Windows (#103) 2020-02-26 18:40:31 +09:00
Yuki Okushi
c69bc11e3e Merge pull request #105 from actix/bench
Add action to check benchmark
2020-02-26 17:33:37 +09:00
Yuki Okushi
aad5c42ad7 Add action to check benchmark 2020-02-26 17:11:46 +09:00
Maxim Vorobjov
4d37858fc6 Benchmarks for actix-service: focused around UnsafeCell usage (#98)
* add benchmark comparing unsafecell vs refcell

* fix syntax

* add benches for and_then implementation options

* repeat benches to stabilize
2020-02-26 16:45:23 +09:00
Yuki Okushi
d402f08bb5 Merge pull request #102 from JohnTitor/single-import
Remove single import
2020-02-25 19:11:04 +09:00
Yuki Okushi
fa25e30427 Remove single import 2020-02-25 18:41:15 +09:00
Bo Yao
602db1779e Expose is_set (#99)
* Expose is_set

* Update doc and changes.md
2020-02-25 02:55:02 -03:00
Yuki Okushi
4f2910c6b3 Merge pull request #96 from actix/JohnTitor-patch-1
Disable coverage for PRs
2020-02-15 01:55:20 +09:00
Yuki Okushi
9f7d6bc068 Disable coverage for PRs 2020-02-14 07:30:21 +09:00
Yuki Okushi
6908b58943 Merge pull request #92 from actix/bye-travis
Move script from Travis to Actions
2020-02-02 06:28:42 +09:00
Yuki Okushi
043057ecbd Fix import scopes 2020-02-01 23:32:08 +09:00
Yuki Okushi
e12bf9200b Clean up metadata 2020-01-31 02:21:25 +09:00
Yuki Okushi
03d431e663 Add badges on README 2020-01-31 00:01:47 +09:00
Yuki Okushi
f0d352604e Remove travis config 2020-01-31 00:01:34 +09:00
Yuki Okushi
2f67e4f563 Use markdown format 2020-01-31 00:01:24 +09:00
Yuki Okushi
d1155d60ec Tweak Actions 2020-01-31 00:01:11 +09:00
Yuki Okushi
28d9c6a760 Merge pull request #90 from actix/fix-ci
Tweak GitHub Actions
2020-01-30 00:46:21 +09:00
Yuki Okushi
a970c2c997 Remove AppVeyor config 2020-01-29 12:05:55 +09:00
Yuki Okushi
d5a6c83207 Suppress/fix clippy warnings 2020-01-29 12:05:55 +09:00
Yuki Okushi
ee0db9a617 Tweak GitHub Actions 2020-01-29 12:05:55 +09:00
zero-systems
e5b5df1261 Optimize vector fill in builder. (#89)
* optimize vector fill
2020-01-22 06:35:22 +09:00
Nikolay Kim
dbfa13d6be Fixed unsoundness in .and_then()/.then() service combinators 2020-01-16 16:58:11 -08:00
Nikolay Kim
e7c2439543 prep release 2020-01-15 13:35:07 -08:00
Nikolay Kim
3116db5168 revert 1.0.3 changes 2020-01-15 13:24:38 -08:00
Nikolay Kim
5940731ef0 Fix actix-service 1.0.3 compatibility 2020-01-15 11:58:06 -08:00
Rajasekharan Vengalil
aed5fecc8a Add support for tokio tracing for actix Service. (#86)
* Add support for tokio tracing for actix Service.

* Address comments

* Change trace's return type to ApplyTransform

* Remove redundant type args

* Remove reference to MakeSpan from docs
2020-01-15 11:43:52 -08:00
Nikolay Kim
a751899aad Fixed unsoundness in AndThenService impl #83 2020-01-15 11:40:15 -08:00
Nikolay Kim
fa800aeba3 Fix AsRef<str> impl 2020-01-14 15:06:02 -08:00
Nikolay Kim
2f89483635 Merge branch 'master' of github.com:actix/actix-net 2020-01-14 00:42:29 -08:00
Nikolay Kim
3048073919 Add PartialEq<T: AsRef<str>>, AsRef<[u8]> impls 2020-01-13 11:58:31 +06:00
amosonn
4bbba803c1 Fix Service documentation (#85) 2020-01-12 07:44:01 +09:00
Sven-Hendrik Haase
4dcdeb6795 Merge pull request #84 from currency-engineering/master
Minor grammatical fix to docs.
2020-01-10 15:28:19 +01:00
Eric Findlay
3b4f222242 Minor grammatical fix to docs. 2020-01-10 20:52:49 +09:00
Nikolay Kim
7c5fa25b23 Add into_service helper function 2020-01-08 18:31:50 +06:00
Nikolay Kim
3551d6674d Add Clone impl for condition::Waiter 2020-01-08 11:18:56 +06:00
Nikolay Kim
9f00daea80 add Condition and Pool 2020-01-08 10:59:27 +06:00
Nikolay Kim
7dddeab2a8 Add ResourceDef::resource_path_named() path generation method 2019-12-31 18:02:43 +06:00
Nikolay Kim
dcbcc40da2 Revert "Support named parameters for ResourceDef::resource_path() in form of ((&k, &v), ...)"
This reverts commit b0d44198ba.
2019-12-31 15:14:53 +06:00
Nikolay Kim
b0d44198ba Support named parameters for ResourceDef::resource_path() in form of ((&k, &v), ...) 2019-12-31 14:53:30 +06:00
Nikolay Kim
974bd6b01e leak string instead of rc 2019-12-31 12:04:35 +06:00
Nikolay Kim
5779da0f49 refactor service and state manahement 2019-12-29 13:42:42 +06:00
Nikolay Kim
1918c8d4f8 rename .run to .start() 2019-12-29 10:07:46 +06:00
Nikolay Kim
e21c58930b Add impl IntoPattern for &String 2019-12-25 21:34:14 +04:00
Nikolay Kim
59c5e9be6a Use IntoPattern for RouterBuilder::path() 2019-12-25 21:01:07 +04:00
Nikolay Kim
a2a9d9764d introduce IntoPattern trait 2019-12-25 19:54:20 +04:00
Nikolay Kim
bf0a9d2f6e Add IntoPatterns trait 2019-12-25 15:34:21 +04:00
Nikolay Kim
119027f822 fmt 2019-12-25 15:10:13 +04:00
Nikolay Kim
0fe8038d23 allow specify set of resource patters 2019-12-25 15:10:01 +04:00
Nikolay Kim
b599bc4a0c map_config() and unit_config() accepts IntoServiceFactory type 2019-12-22 16:30:49 +04:00
Nikolay Kim
a80e1f8370 fix new() method and make from_static and from_bytes_unchecked methods const 2019-12-22 16:24:28 +04:00
Nikolay Kim
5fe759cc02 Merge branch 'master' of github.com:actix/actix-net 2019-12-20 09:15:19 +06:00
Nikolay Kim
05549f0b42 Add methods to check LocalWaker registration state 2019-12-20 09:13:11 +06:00
Yuki Okushi
b1430eaded Run tests for all features as possible (#78) 2019-12-19 16:31:32 +09:00
Nikolay Kim
0d3f9e74c5 Use .advance() intead of .split_to() 2019-12-19 09:50:31 +06:00
Nikolay Kim
cab73791ed pin trsut-dns-proto 2019-12-15 13:04:26 +06:00
Nikolay Kim
a7ac1a76ed add license files to actix-macros 2019-12-14 23:01:55 +06:00
Nikolay Kim
37bedff6fb use parking_lot 0.10 2019-12-12 06:57:40 +06:00
Nikolay Kim
33fd6adc11 better InOrder test 2019-12-12 06:56:45 +06:00
Nikolay Kim
4305cdba2c Revert InOrder service changes 2019-12-11 23:10:02 +06:00
Nikolay Kim
52ecb4bcc5 Add oneshot::Sender::is_canceled() method 2019-12-11 20:52:57 +06:00
Nikolay Kim
b28f32e82c Allow to create framed::Dispatcher with custom mpsc::Receiver 2019-12-11 20:23:14 +06:00
Nikolay Kim
081205a02f Disconnect callback accepts owned state 2019-12-11 18:57:43 +06:00
Nikolay Kim
8bb81c0768 optimize InOrder service 2019-12-11 18:55:53 +06:00
Nikolay Kim
c7a8743bf9 remove E param 2019-12-11 16:44:09 +06:00
Nikolay Kim
f26fcc703b prep release 2019-12-11 14:56:05 +06:00
Nikolay Kim
ce4587df82 prepare actix-tls release 2019-12-11 14:53:58 +06:00
Nikolay Kim
9957f28137 prepare actix-testing release 2019-12-11 14:49:26 +06:00
Nikolay Kim
9d84d14ef4 update deps 2019-12-11 14:47:30 +06:00
Nikolay Kim
60bfa1bfb1 prepare actix-server release 2019-12-11 14:43:26 +06:00
Nikolay Kim
2c81c22b3e refactor ioframe dispatcher 2019-12-11 14:36:11 +06:00
Nikolay Kim
dded482514 allow to close mpsc sender 2019-12-11 14:36:00 +06:00
Nikolay Kim
631cb86947 refactor framed and stream dispatchers 2019-12-11 12:42:07 +06:00
Nikolay Kim
2e5e69c9ba Simplify oneshot and mpsc implementations 2019-12-11 11:28:09 +06:00
Nikolay Kim
e315cf2893 prep actix-rt release; update deps 2019-12-11 10:34:50 +06:00
Nikolay Kim
13fd615966 actix-macros release 2019-12-11 10:32:01 +06:00
Nikolay Kim
c094f84b85 prepare actix-service release 2019-12-11 10:29:34 +06:00
Nikolay Kim
25012d290a update actix-codec dependencies 2019-12-11 10:23:01 +06:00
Nikolay Kim
32202188cc prepare actix-codec release 2019-12-11 10:18:11 +06:00
Nikolay Kim
bf734a31dc update docs 2019-12-10 21:34:51 +06:00
Nikolay Kim
d29e7c4ba6 Merge branch 'master' of github.com:actix/actix-net 2019-12-10 21:14:18 +06:00
Nikolay Kim
7163e2c2a2 update doc strings 2019-12-10 21:14:06 +06:00
Nikolay Kim
1d810b4561 re-export AlpnError 2019-12-10 12:15:27 +06:00
daxpedda
0913badd61 Macro improvements. (#74)
* Macro improvements.

* Fix usage in `fn main`.
2019-12-10 08:47:35 +06:00
Nikolay Kim
8b3062cd6e Fix buffer remaining capacity calcualtion 2019-12-09 21:50:36 +06:00
Nikolay Kim
35218a4df1 add Clone impl for Apply service 2019-12-09 14:07:20 +06:00
Nikolay Kim
d47f1fb730 prepare actix-service release 2019-12-08 19:49:35 +06:00
Nikolay Kim
1ad0bbfb7f rename fn service helpers 2019-12-08 19:05:05 +06:00
Nikolay Kim
c38a25f102 fix hash impl 2019-12-07 11:51:47 +06:00
Nikolay Kim
110457477a update changes 2019-12-07 11:04:53 +06:00
Nikolay Kim
a899b1e04d bump actix-ioframe version 2019-12-07 10:55:54 +06:00
Nikolay Kim
393cf1ab25 add unsafe from_bytes_unchecked 2019-12-07 10:48:22 +06:00
Nikolay Kim
40fbbb9c32 fix crate name 2019-12-07 10:39:33 +06:00
Nikolay Kim
99fef4f06b add helper conversions 2019-12-07 10:22:08 +06:00
Nikolay Kim
fc0825fcdd update tokio to 0.2.4 2019-12-07 10:15:26 +06:00
Nikolay Kim
6c00ab8296 add string crate 2019-12-07 09:59:39 +06:00
Nikolay Kim
cbdbc05dbd update tokio verion and prep alpha3 release 2019-12-07 09:57:43 +06:00
Yuki Okushi
5674840c01 Stop running tests for all features (#73) 2019-12-07 08:54:58 +06:00
Nikolay Kim
6f07c9d72a update trust-dns 2019-12-06 14:08:11 +06:00
Nikolay Kim
fa48ddcfa1 fix non unix signals support 2019-12-06 14:06:14 +06:00
Max Gortman
f89a992daf eager drop in then, and_then, and_then_apply_fn (#72) 2019-12-06 10:34:44 +06:00
Nikolay Kim
e670a32ff3 inclide stream feature 2019-12-06 01:34:13 +06:00
Nikolay Kim
021c742d22 use string crate from master 2019-12-06 00:10:27 +06:00
Nikolay Kim
88a60ffa66 reexport ssl types 2019-12-05 23:09:44 +06:00
Nikolay Kim
cb2845cb26 fix dependencies 2019-12-05 20:58:28 +06:00
Nikolay Kim
b18fbc98d5 move rustls and nativetls acceptor services to actix-tls 2019-12-05 20:52:37 +06:00
Nikolay Kim
3a858feaec migrate to tokio 0.2.2 2019-12-05 16:40:24 +06:00
Nikolay Kim
d49aca9595 use bitflags for internal flags; use tokio 0.2 2019-12-05 13:11:56 +06:00
Nikolay Kim
6f41b80cb4 optimize service combinators memory layout 2019-12-05 12:37:26 +06:00
Nikolay Kim
c6eb318536 Fix low/high watermark for write/read buffers; fix oneshot impl 2019-12-05 01:36:31 +06:00
Nikolay Kim
21dcc22e53 refactor server configurations 2019-12-04 21:35:27 +06:00
Nikolay Kim
de84663768 fix initial worker service state 2019-12-04 15:52:49 +06:00
Nikolay Kim
c4e2051327 refactor server worker 2019-12-04 15:12:02 +06:00
Nikolay Kim
0a4fe22003 Restore Service/Factory::apply_fn() in form of Pipeline/Factory::and_then_apply_fn() 2019-12-03 19:59:28 +06:00
Nikolay Kim
eb773c8b8c Merge branch 'master' of github.com:actix/actix-net 2019-12-03 18:34:32 +06:00
Nikolay Kim
db0bc1e156 Restore Transform::map_init_err() combinator 2019-12-03 18:32:02 +06:00
Yuki Okushi
9eb12e0467 Use GitHub Actions (#71) 2019-12-03 20:00:16 +09:00
Nikolay Kim
eb33f0ecbe add Clone for apply combinator 2019-12-03 16:15:06 +06:00
Nikolay Kim
cbc5da8625 update changes 2019-12-03 14:10:36 +06:00
Nikolay Kim
ec8dca8d69 Merge branch 'master' of github.com:actix/actix-net 2019-12-03 14:09:35 +06:00
Nikolay Kim
6a9df026e7 Add missing Clone impl for factory_fn_cfg 2019-12-03 14:05:23 +06:00
Aaron Housh
2756bedc3d Fix for non Unix OS (#69) 2019-12-03 10:07:54 +06:00
Nikolay Kim
bd4c4cda8b update threadpool 2019-12-02 22:49:02 +06:00
Nikolay Kim
c0ede65317 restore 0.1 behavior 2019-12-02 22:47:49 +06:00
Nikolay Kim
9f575418c1 clippy warnings 2019-12-02 22:30:09 +06:00
Nikolay Kim
9ed35cca7a use owned value for service factory config 2019-12-02 21:27:48 +06:00
Nikolay Kim
3385682e09 remove server feature 2019-12-02 17:04:42 +06:00
Nikolay Kim
f55f96bc77 fix dependencies 2019-12-02 11:49:42 +06:00
Nikolay Kim
a08b1eba87 update tests 2019-12-02 11:43:52 +06:00
Nikolay Kim
d81e72cf06 remove deprecaed crate 2019-12-02 11:30:52 +06:00
Nikolay Kim
9fbe6a1f6d refactor server configuration and tls support 2019-12-02 11:30:27 +06:00
Nikolay Kim
16ff283fb2 add metadata 2019-12-01 20:30:24 +06:00
Nikolay Kim
503c2feb08 re-export net primitives 2019-12-01 10:56:25 +06:00
Nikolay Kim
bec4efc699 add extra methods to pipeline 2019-11-29 13:51:00 +06:00
Nikolay Kim
5e5ae2ddec restore stream dispatcher 2019-11-29 10:41:09 +06:00
Nikolay Kim
a02064592b disable rustls 2019-11-27 21:03:26 +06:00
Nikolay Kim
af72005159 move BoxFuture to boxed mod 2019-11-27 20:59:36 +06:00
Nikolay Kim
c254bb978c allow to wait on Server until server stops; restore signal handling 2019-11-26 17:03:52 +06:00
Nikolay Kim
009f8e2e7c allow to wait server exit 2019-11-26 16:33:45 +06:00
Nikolay Kim
f5aecdee8f work around to rust#62127 2019-11-26 10:14:21 +06:00
Nikolay Kim
4546774f4e inclide fn ident to err message 2019-11-26 10:04:46 +06:00
Nikolay Kim
2cf140a869 inclide fn token to err message 2019-11-26 10:01:46 +06:00
Nikolay Kim
e76ea8e80c re-export timeout 2019-11-26 09:04:14 +06:00
Nikolay Kim
52d03fa18c use actix deps instead of tokio 2019-11-26 08:26:22 +06:00
Nikolay Kim
5efac449b1 re-export time utils 2019-11-26 08:12:16 +06:00
Nikolay Kim
4ceac79f2c add test and main macros 2019-11-25 21:49:11 +06:00
Nikolay Kim
1fddd1e75b renamed boxed service 2019-11-25 18:18:00 +06:00
Nikolay Kim
905d058454 upgrade derive_more 2019-11-25 17:54:47 +06:00
Nikolay Kim
5265714f68 prep alpha.1 release 2019-11-21 19:58:55 +06:00
Nikolay Kim
ae4394c0f2 fix uds server support 2019-11-21 00:35:44 +06:00
Nikolay Kim
d3c5518646 fix rustls acceptor 2019-11-19 18:54:36 +06:00
Nikolay Kim
3bf83c1d98 cleanup Unpin constraint; simplify Framed impl 2019-11-19 14:51:40 +06:00
Nikolay Kim
617e40a7e9 fix framed_read 2019-11-19 11:06:55 +06:00
Nikolay Kim
3105cde168 add Service impl for RefCell<S> 2019-11-19 08:45:09 +06:00
Nikolay Kim
5b74c79cf9 Simplify transform trait, remove map_init_err 2019-11-19 06:51:43 +06:00
Nikolay Kim
8bf8ad86d6 add IntoServiceFactory impl for servie_fn 2019-11-18 20:46:49 +06:00
Nikolay Kim
877f89eeb7 use service types for ssl connectors 2019-11-18 20:20:56 +06:00
Nikolay Kim
1354946460 remove pin-project; update Unpin consrtaint 2019-11-18 18:28:54 +06:00
Nikolay Kim
7404d82a9b use concrete types 2019-11-18 14:30:04 +06:00
Nikolay Kim
c1cdc9908a update deps and fix definitions 2019-11-15 16:06:44 +06:00
Yuki Okushi
be7904fd57 Fix code style (#65)
* Fix clippy warnings

* cargo fmt

* Remove redundant lifetime
2019-11-15 00:28:29 +09:00
Nikolay Kim
13049b80ca Migrate actix-net to std::future (#64)
* Migrate actix-codec, actix-rt, and actix-threadpool to std::future

* update to latest tokio alpha and futures-rs

* Migrate actix-service to std::future,

This is a squash of ~8 commits, since it included a lot of experimentation. To see the commits,
look into the semtexzv/std-future-service-tmp branch.

* update futures-rs and tokio

* Migrate actix-threadpool to std::future (#59)

* Migrate actix-threadpool to std::future

* Cosmetic refactor

- turn log::error! into log::warn! as it doesn't throw any error
- add Clone and Copy impls for Cancelled making it cheap to operate with
- apply rustfmt

* Bump up crate version to 0.2.0 and pre-fill its changelog

* Disable patching 'actix-threadpool' crate in global workspace as unnecessary

* Revert patching and fix 'actix-rt'

* Migrate actix-rt to std::future (#47)

* remove Pin from Service::poll_ready(); simplify combinators api; make code compile

* disable tests

* update travis config

* refactor naming

* drop IntoFuture trait

* Migrate actix-server to std::future (#50)

Still not finished, this is more WIP, this is an aggregation of several commits, which
can be found in semtexzv/std-future-server-tmp branch

* update actix-server

* rename Factor to ServiceFactory

* start server worker in start mehtod

* update actix-utils

* remove IntoTransform trait

* Migrate actix-server::ssl::nativetls to std futures (#61)

* Refactor 'nativetls' module

* Migrate 'actix-server-config' to std futures

- remove "uds" feature
- disable features by default

* Switch NativeTlsAcceptor to use 'tokio-tls' crate

* Bikeshed features names and remove unnecessary dependencies for 'actix-server-config' crate

* update openssl impl

* migrate actix-connect to std::future

* migrate actix-ioframe to std::future

* update version to alpha.1

* fix boxed service

* migrate server rustls support

* migratte openssl and rustls connecttors

* store the thread's handle with arbiter (#62)

* update ssl connect tests

* restore service tests

* update readme
2019-11-14 18:38:24 +06:00
Nikolay Kim
9fa2a36b4e prepare actix-rt release 2019-11-14 17:33:28 +06:00
Ivan Ladelshchikov
ed5023128b store the thread's handle with arbiter (#60) 2019-11-14 15:07:33 +06:00
Nikolay Kim
2e8c2c7733 Re-register task on every future poll 2019-10-14 17:55:52 +06:00
Nikolay Kim
115e82329f fix arbiter thread panic message 2019-10-14 11:19:08 +06:00
Nikolay Kim
0b0060fe47 update deps 2019-10-14 10:37:48 +06:00
Nikolay Kim
35e32d8e55 prepare actix-testing release 2019-10-14 10:30:27 +06:00
Nikolay Kim
9982a9498d register current task in counters available method. 2019-10-08 15:02:43 +06:00
Nikolay Kim
fa72975f34 extra trace logging 2019-10-08 14:46:22 +06:00
Sven-Hendrik Haase
fe5de2510d Merge pull request #56 from actix/fix-52
Add an error message if we receive a non-hostname-based dest
2019-10-04 13:48:20 +02:00
Yuki Okushi
e3155957a8 Prepare actix-server release (#55) 2019-10-04 17:36:23 +09:00
Sven-Hendrik Haase
f6f9e1fcdb Add an error message if we receive a non-hostname-based dest
This is more helpful than an unwrap and at least points users at the right location.
Upstream issue is https://github.com/briansmith/webpki/issues/54
2019-10-04 07:30:13 +02:00
Yuki Okushi
2667850d60 Prepare actix-server-config release (#54)
* Prepare actix-server-config release

* Bump up actix-server-config to 0.2.0
2019-10-04 06:13:33 +06:00
Yuki Okushi
fba2002702 Prepare actix-connect release (#53) 2019-10-04 06:21:59 +09:00
Jerome Gravel-Niquet
e733c562d9 Update rustls, tokio-rustls and webpki across the board (#42)
* Update rustls, tokio-rustls and webpki across the board

* bump minimum rust version to 1.37

* updated readme and changelogs to reflect changes and minimum required rust version
2019-10-04 03:32:32 +09:00
Yuki Okushi
8f05986a9f Use map() instead of and_then() (#51) 2019-10-03 14:55:44 +09:00
Nikolay Kim
aa9bbe2114 prepare actix-ioframe release 2019-09-25 10:47:06 +06:00
Nikolay Kim
4837a901e2 prepare actix-server release 2019-09-25 10:35:15 +06:00
Nikolay Kim
a02ff17cb1 remove actix-tower from workspace 2019-09-25 10:11:17 +06:00
Nikolay Kim
dbf566928c drop tower intergration 2019-09-25 10:01:08 +06:00
Nikolay Kim
ca982b2467 update workspace deps for tests 2019-09-25 10:00:54 +06:00
Nikolay Kim
c859d13e3b use actix-testing instead of test server 2019-09-25 09:51:28 +06:00
Nikolay Kim
41e49e8093 update changes 2019-09-25 09:32:33 +06:00
Nikolay Kim
715a770d7a deprecate test server 2019-09-25 09:31:52 +06:00
Nikolay Kim
5469d8c910 prep actix-testing release 2019-09-25 09:26:12 +06:00
Nikolay Kim
8be5f773f4 add actix-testing crate 2019-09-17 16:04:20 +06:00
karlri
b686b4c34e Feature uds: Add listen_uds to ServerBuilder (#43)
Allows directly passing an Unix Listener instead of a path. Useful
for example when running as a daemon under systemd with the systemd
crate.
2019-09-16 11:07:46 +06:00
Nikolay Kim
34a7b7f05a add TcpStreamService 2019-09-05 16:34:48 -07:00
186 changed files with 9830 additions and 11798 deletions

View File

@@ -1,41 +0,0 @@
environment:
global:
PROJECT_NAME: actix-net
matrix:
# Stable channel
- TARGET: i686-pc-windows-msvc
CHANNEL: stable
- TARGET: x86_64-pc-windows-gnu
CHANNEL: stable
- TARGET: x86_64-pc-windows-msvc
CHANNEL: stable
# Nightly channel
- TARGET: i686-pc-windows-msvc
CHANNEL: nightly
- TARGET: x86_64-pc-windows-gnu
CHANNEL: nightly
- TARGET: x86_64-pc-windows-msvc
CHANNEL: nightly
# Install Rust and Cargo
# (Based on from https://github.com/rust-lang/libc/blob/master/appveyor.yml)
install:
- ps: >-
If ($Env:TARGET -eq 'x86_64-pc-windows-gnu') {
$Env:PATH += ';C:\msys64\mingw64\bin'
} ElseIf ($Env:TARGET -eq 'i686-pc-windows-gnu') {
$Env:PATH += ';C:\MinGW\bin'
}
- curl -sSf -o rustup-init.exe https://win.rustup.rs
- rustup-init.exe --default-host %TARGET% --default-toolchain %CHANNEL% -y
- set PATH=%PATH%;C:\Users\appveyor\.cargo\bin
- rustc -Vv
- cargo -V
# 'cargo test' takes care of building for us, so disable Appveyor's build stage.
build: false
# Equivalent to Travis' `script` phase
test_script:
- cargo clean
- cargo test

24
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@@ -0,0 +1,24 @@
## PR Type
<!-- What kind of change does this PR make? -->
<!-- Bug Fix / Feature / Refactor / Code Style / Other -->
INSERT_PR_TYPE
## PR Checklist
Check your PR fulfills the following:
<!-- For draft PRs check the boxes as you complete them. -->
- [ ] Tests for the changes have been added / updated.
- [ ] Documentation comments have been added / updated.
- [ ] A changelog entry has been made for the appropriate packages.
- [ ] Format code with the latest stable rustfmt
## Overview
<!-- Describe the current and new behavior. -->
<!-- Emphasize any breaking changes. -->
<!-- If this PR fixes or closes an issue, reference it here. -->
<!-- Closes #000 -->

34
.github/workflows/clippy-fmt.yml vendored Normal file
View File

@@ -0,0 +1,34 @@
on:
pull_request:
types: [opened, synchronize, reopened]
name: Clippy and rustfmt Check
jobs:
clippy_check:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions-rs/toolchain@v1
with:
toolchain: stable
components: rustfmt
profile: minimal
override: true
- name: Check with rustfmt
uses: actions-rs/cargo@v1
with:
command: fmt
args: --all -- --check
- uses: actions-rs/toolchain@v1
with:
toolchain: nightly
components: clippy
profile: minimal
override: true
- name: Check with Clippy
uses: actions-rs/clippy-check@v1
with:
token: ${{ secrets.GITHUB_TOKEN }}
args: --workspace --tests

82
.github/workflows/linux.yml vendored Normal file
View File

@@ -0,0 +1,82 @@
name: CI (Linux)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
- '1.0'
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- 1.46.0
- stable
- nightly
name: ${{ matrix.version }} - x86_64-unknown-linux-gnu
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-unknown-linux-gnu
profile: minimal
override: true
- name: Generate Cargo.lock
uses: actions-rs/cargo@v1
with:
command: generate-lockfile
- name: Cache cargo dirs
uses: actions/cache@v2
with:
path:
~/.cargo/registry
~/.cargo/git
~/.cargo/bin
key: ${{ matrix.version }}-x86_64-unknown-linux-gnu-cargo-trimmed-${{ hashFiles('**/Cargo.lock') }}
- name: Cache cargo build
uses: actions/cache@v2
with:
path: target
key: ${{ matrix.version }}-x86_64-unknown-linux-gnu-cargo-build-trimmed-${{ hashFiles('**/Cargo.lock') }}
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --workspace --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
timeout-minutes: 40
with:
command: test
args: --workspace --exclude=actix-tls --no-fail-fast -- --nocapture
- name: Generate coverage file
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
run: |
cargo install cargo-tarpaulin
cargo tarpaulin --out Xml --workspace
- name: Upload to Codecov
if: matrix.version == 'stable' && (github.ref == 'refs/heads/master' || github.event_name == 'pull_request')
uses: codecov/codecov-action@v1
with:
file: cobertura.xml
- name: Clear the cargo caches
run: |
rustup update stable
rustup override set stable
cargo install cargo-cache --no-default-features --features ci-autoclean
cargo-cache

43
.github/workflows/macos.yml vendored Normal file
View File

@@ -0,0 +1,43 @@
name: CI (macOS)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
- '1.0'
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- stable
- nightly
name: ${{ matrix.version }} - x86_64-apple-darwin
runs-on: macos-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-apple-darwin
profile: minimal
override: true
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --workspace --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
with:
command: test
args: --workspace --exclude=actix-tls --no-fail-fast -- --nocapture

45
.github/workflows/windows-mingw.yml vendored Normal file
View File

@@ -0,0 +1,45 @@
name: CI (Windows-mingw)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
- '1.0'
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- stable
- nightly
name: ${{ matrix.version }} - x86_64-pc-windows-gnu
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-x86_64-pc-windows-gnu
profile: minimal
override: true
- name: Install MSYS2
uses: msys2/setup-msys2@v2
- name: Install packages
run: |
msys2 -c 'pacman -Sy --noconfirm pacman'
msys2 -c 'pacman --noconfirm -S base-devel pkg-config'
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --workspace --bins --examples --tests

69
.github/workflows/windows.yml vendored Normal file
View File

@@ -0,0 +1,69 @@
name: CI (Windows)
on:
pull_request:
types: [opened, synchronize, reopened]
push:
branches:
- master
- '1.0'
env:
VCPKGRS_DYNAMIC: 1
jobs:
build_and_test:
strategy:
fail-fast: false
matrix:
version:
- stable
- nightly
target:
- x86_64-pc-windows-msvc
- i686-pc-windows-msvc
name: ${{ matrix.version }} - ${{ matrix.target }}
runs-on: windows-latest
steps:
- uses: actions/checkout@v2
- name: Install ${{ matrix.version }}
uses: actions-rs/toolchain@v1
with:
toolchain: ${{ matrix.version }}-${{ matrix.target }}
profile: minimal
override: true
- name: Install OpenSSL (x64)
if: matrix.target == 'x86_64-pc-windows-msvc'
run: |
vcpkg integrate install
vcpkg install openssl:x64-windows
Get-ChildItem C:\vcpkg\installed\x64-windows\bin
Get-ChildItem C:\vcpkg\installed\x64-windows\lib
Copy-Item C:\vcpkg\installed\x64-windows\bin\libcrypto-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libcrypto.dll
Copy-Item C:\vcpkg\installed\x64-windows\bin\libssl-1_1-x64.dll C:\vcpkg\installed\x64-windows\bin\libssl.dll
- name: Install OpenSSL (x86)
if: matrix.target == 'i686-pc-windows-msvc'
run: |
vcpkg integrate install
vcpkg install openssl:x86-windows
Get-ChildItem C:\vcpkg\installed\x86-windows\bin
Get-ChildItem C:\vcpkg\installed\x86-windows\lib
Copy-Item C:\vcpkg\installed\x86-windows\bin\libcrypto-1_1.dll C:\vcpkg\installed\x86-windows\bin\libcrypto.dll
Copy-Item C:\vcpkg\installed\x86-windows\bin\libssl-1_1.dll C:\vcpkg\installed\x86-windows\bin\libssl.dll
- name: check build
uses: actions-rs/cargo@v1
with:
command: check
args: --workspace --bins --examples --tests
- name: tests
uses: actions-rs/cargo@v1
with:
command: test
args: --workspace --exclude=actix-tls --no-fail-fast -- --nocapture

2
.gitignore vendored
View File

@@ -12,3 +12,5 @@ guide/build/
# These are backup files generated by rustfmt
**/*.rs.bk
.idea

View File

@@ -1,50 +0,0 @@
language: rust
sudo: required
dist: trusty
cache:
cargo: true
apt: true
matrix:
include:
- rust: stable
- rust: beta
- rust: 1.36.0
- rust: nightly-2019-06-15
allow_failures:
- rust: nightly-2019-06-15
env:
global:
- RUSTFLAGS="-C link-dead-code"
- OPENSSL_VERSION=openssl-1.0.2
before_install:
- sudo add-apt-repository -y ppa:0k53d-karl-f830m/openssl
- sudo apt-get update -qq
- sudo apt-get install -y openssl libssl-dev libelf-dev libdw-dev cmake gcc binutils-dev libiberty-dev
before_cache: |
if [[ "$TRAVIS_RUST_VERSION" == "nightly-2019-06-15" ]]; then
RUSTFLAGS="--cfg procmacro2_semver_exempt" cargo install --version 0.6.11 cargo-tarpaulin
fi
# Add clippy
before_script:
- export PATH=$PATH:~/.cargo/bin
script:
- |
if [[ "$TRAVIS_RUST_VERSION" != "nightly-2019-06-15" ]]; then
cargo clean
cargo test --all --all-features -- --nocapture
fi
after_success:
- |
if [[ "$TRAVIS_RUST_VERSION" == "nightly-2019-06-15" ]]; then
taskset -c 0 cargo tarpaulin --all --all-features --out Xml
echo "Uploaded code coverage"
bash <(curl -s https://codecov.io/bash)
fi

View File

@@ -1,68 +0,0 @@
# Changes
## [0.3.0] - xxx
* Split `Service` trait to separate crate
* Use new `Service<Request>` trait
## [0.2.4] - 2018-11-21
### Added
* Allow to skip name resolution stage in Connector
## [0.2.3] - 2018-11-17
### Added
* Framed::is_write_buf_empty() checks if write buffer is flushed
## [0.2.2] - 2018-11-14
### Added
* Add low/high caps to Framed
### Changed
* Refactor Connector and Resolver services
### Fixed
* Fix wrong service to socket binding
## [0.2.0] - 2018-11-08
### Added
* Timeout service
* Added ServiceConfig and ServiceRuntime for server service configuration
### Changed
* Connector has been refactored
* timer and LowResTimer renamed to time and LowResTime
* Refactored `Server::configure()` method
## [0.1.1] - 2018-10-10
### Changed
- Set actix min version - 0.7.5
- Set trust-dns min version
## [0.1.0] - 2018-10-08
* Initial impl

View File

@@ -34,10 +34,13 @@ This Code of Conduct applies both within project spaces and in public spaces whe
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at fafhrd91@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at robjtede@icloud.com ([@robjtede]) or huyuumi@neet.club ([@JohnTitor]). The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
[@robjtede]: https://github.com/robjtede
[@JohnTitor]: https://github.com/JohnTitor
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]

View File

@@ -1,41 +1,27 @@
[package]
name = "actix-net"
version = "0.3.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix net - framework for the composable network services for Rust"
readme = "README.md"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-net/"
categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0"
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
edition = "2018"
[workspace]
members = [
"actix-codec",
"actix-connect",
"actix-macros",
"actix-router",
"actix-rt",
"actix-service",
"actix-server",
"actix-server-config",
"actix-test-server",
"actix-service",
"actix-threadpool",
"actix-tower",
"actix-ioframe",
"actix-tls",
"actix-tracing",
"actix-utils",
"router",
"bytestring",
]
[dev-dependencies]
actix-service = "0.4.0"
actix-codec = "0.1.1"
actix-rt = "0.2.0"
actix-server = { version="0.5.0", features=["ssl"] }
env_logger = "0.6"
futures = "0.1.25"
openssl = "0.10"
tokio-tcp = "0.1"
tokio-openssl = "0.3"
[patch.crates-io]
actix-codec = { path = "actix-codec" }
actix-macros = { path = "actix-macros" }
actix-router = { path = "actix-router" }
actix-rt = { path = "actix-rt" }
actix-server = { path = "actix-server" }
actix-service = { path = "actix-service" }
actix-threadpool = { path = "actix-threadpool" }
actix-tls = { path = "actix-tls" }
actix-tracing = { path = "actix-tracing" }
actix-utils = { path = "actix-utils" }
bytestring = { path = "bytestring" }

View File

@@ -1,60 +1,26 @@
# Actix net [![Build Status](https://travis-ci.org/actix/actix-net.svg?branch=master)](https://travis-ci.org/actix/actix-net) [![codecov](https://codecov.io/gh/actix/actix-net/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-net) [![crates.io](https://meritbadge.herokuapp.com/actix-net)](https://crates.io/crates/actix-net) [![Join the chat at https://gitter.im/actix/actix](https://badges.gitter.im/actix/actix.svg)](https://gitter.im/actix/actix?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
# Actix Net
Actix net - framework for composable network services
> A collection of lower-level libraries for composable network services.
## Documentation & community resources
![Apache 2.0 or MIT licensed](https://img.shields.io/crates/l/actix-server)
[![codecov](https://codecov.io/gh/actix/actix-net/branch/master/graph/badge.svg)](https://codecov.io/gh/actix/actix-net)
[![Chat on Discord](https://img.shields.io/discord/771444961383153695?label=chat&logo=discord)](https://discord.gg/NWpN5mmg3x)
* [API Documentation (Development)](https://actix.rs/actix-net/actix_net/)
* [Chat on gitter](https://gitter.im/actix/actix)
* Cargo package: [actix-net](https://crates.io/crates/actix-net)
* Minimum supported Rust version: 1.36 or later
## Build statuses
| Platform | Build Status |
| ---------------- | ------------ |
| Linux | [![build status](https://github.com/actix/actix-net/workflows/CI%20%28Linux%29/badge.svg?branch=master&event=push)](https://github.com/actix/actix-net/actions?query=workflow%3A"CI+(Linux)") |
| macOS | [![build status](https://github.com/actix/actix-net/workflows/CI%20%28macOS%29/badge.svg?branch=master&event=push)](https://github.com/actix/actix-net/actions?query=workflow%3A"CI+(macOS)") |
| Windows | [![build status](https://github.com/actix/actix-net/workflows/CI%20%28Windows%29/badge.svg?branch=master&event=push)](https://github.com/actix/actix-net/actions?query=workflow%3A"CI+(Windows)") |
| Windows (MinGW) | [![build status](https://github.com/actix/actix-net/workflows/CI%20%28Windows-mingw%29/badge.svg?branch=master&event=push)](https://github.com/actix/actix-net/actions?query=workflow%3A"CI+(Windows-mingw)") |
## Example
See `actix-server/examples` and `actix-tls/examples` for some basic examples.
```rust
fn main() -> io::Result<()> {
// load ssl keys
let mut builder = SslAcceptor::mozilla_intermediate(SslMethod::tls()).unwrap();
builder.set_private_key_file("./examples/key.pem", SslFiletype::PEM).unwrap();
builder.set_certificate_chain_file("./examples/cert.pem").unwrap();
let acceptor = builder.build();
let num = Arc::new(AtomicUsize::new(0));
// bind socket address and start workers. By default server uses number of
// available logical cpu as threads count. actix net start separate
// instances of service pipeline in each worker.
Server::build()
.bind(
// configure service pipeline
"basic", "0.0.0.0:8443",
move || {
let num = num.clone();
let acceptor = acceptor.clone();
// service for converting incoming TcpStream to a SslStream<TcpStream>
fn_service(move |stream: Io<tokio_tcp::TcpStream>| {
SslAcceptorExt::accept_async(&acceptor, stream.into_parts().0)
.map_err(|e| println!("Openssl error: {}", e))
})
// .and_then() combinator uses other service to convert incoming `Request` to a
// `Response` and then uses that response as an input for next
// service. in this case, on success we use `logger` service
.and_then(fn_service(logger))
// Next service counts number of connections
.and_then(move |_| {
let num = num.fetch_add(1, Ordering::Relaxed);
println!("got ssl connection {:?}", num);
future::ok(())
})
},
)?
.run()
}
```
### MSRV
This repo's Minimum Supported Rust Version (MSRV) is 1.46.0.
## License
This project is licensed under either of
* Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or [http://www.apache.org/licenses/LICENSE-2.0](http://www.apache.org/licenses/LICENSE-2.0))
@@ -64,6 +30,5 @@ at your option.
## Code of Conduct
Contribution to the actix-net crate is organized under the terms of the
Contributor Covenant, the maintainer of actix-net, @fafhrd91, promises to
intervene to uphold that code of conduct.
Contribution to the actix-net repo is organized under the terms of the Contributor Covenant.
The Actix team promises to intervene to uphold that code of conduct.

View File

@@ -1,15 +1,59 @@
# Changes
## [0.1.2] - 2019-03-27
## Unreleased - 2021-xx-xx
## 0.4.0-beta.1 - 2020-12-28
* Replace `pin-project` with `pin-project-lite`. [#237]
* Upgrade `tokio` dependency to `1`. [#237]
* Upgrade `tokio-util` dependency to `0.6`. [#237]
* Upgrade `bytes` dependency to `1`. [#237]
[#237]: https://github.com/actix/actix-net/pull/237
## 0.3.0 - 2020-08-23
* No changes from beta 2.
## 0.3.0-beta.2 - 2020-08-19
* Remove unused type parameter from `Framed::replace_codec`.
## 0.3.0-beta.1 - 2020-08-19
* Use `.advance()` instead of `.split_to()`.
* Upgrade `tokio-util` to `0.3`.
* Improve `BytesCodec` `.encode()` performance
* Simplify `BytesCodec` `.decode()`
* Rename methods on `Framed` to better describe their use.
* Add method on `Framed` to get a pinned reference to the underlying I/O.
* Add method on `Framed` check emptiness of read buffer.
## 0.2.0 - 2019-12-10
* Use specific futures dependencies
## 0.2.0-alpha.4
* Fix buffer remaining capacity calculation
## 0.2.0-alpha.3
* Use tokio 0.2
* Fix low/high watermark for write/read buffers
## 0.2.0-alpha.2
* Migrated to `std::future`
## 0.1.2 - 2019-03-27
* Added `Framed::map_io()` method.
## [0.1.1] - 2019-03-06
## 0.1.1 - 2019-03-06
* Added `FramedParts::with_read_buffer()` method.
## [0.1.0] - 2018-12-09
## 0.1.0 - 2018-12-09
* Move codec to separate crate

View File

@@ -1,25 +1,26 @@
[package]
name = "actix-codec"
version = "0.1.2"
version = "0.4.0-beta.1"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Utilities for encoding and decoding frames"
description = "Codec utilities for working with framed protocols"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-codec/"
categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0"
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
license = "MIT OR Apache-2.0"
edition = "2018"
workspace = ".."
[lib]
name = "actix_codec"
path = "src/lib.rs"
[dependencies]
bytes = "0.4.12"
futures = "0.1.24"
tokio-io = "0.1.12"
tokio-codec = "0.1.1"
log = "0.4"
bitflags = "1.2.1"
bytes = "1"
futures-core = { version = "0.3.7", default-features = false }
futures-sink = { version = "0.3.7", default-features = false }
log = "0.4"
pin-project-lite = "0.2"
tokio = "1"
tokio-util = { version = "0.6", features = ["codec", "io"] }

View File

@@ -1,7 +1,7 @@
use bytes::{Buf, Bytes, BytesMut};
use std::io;
use bytes::{Bytes, BytesMut};
use tokio_codec::{Decoder, Encoder};
use super::{Decoder, Encoder};
/// Bytes codec.
///
@@ -9,12 +9,12 @@ use tokio_codec::{Decoder, Encoder};
#[derive(Debug, Copy, Clone)]
pub struct BytesCodec;
impl Encoder for BytesCodec {
type Item = Bytes;
impl Encoder<Bytes> for BytesCodec {
type Error = io::Error;
#[inline]
fn encode(&mut self, item: Bytes, dst: &mut BytesMut) -> Result<(), Self::Error> {
dst.extend_from_slice(&item[..]);
dst.extend_from_slice(item.chunk());
Ok(())
}
}
@@ -27,7 +27,7 @@ impl Decoder for BytesCodec {
if src.is_empty() {
Ok(None)
} else {
Ok(Some(src.take()))
Ok(Some(src.split()))
}
}
}

View File

@@ -1,118 +1,73 @@
#![allow(deprecated)]
use std::pin::Pin;
use std::task::{Context, Poll};
use std::{fmt, io};
use std::fmt;
use std::io::{self, Read, Write};
use bytes::{Buf, BytesMut};
use futures_core::{ready, Stream};
use futures_sink::Sink;
use bytes::BytesMut;
use futures::{Poll, Sink, StartSend, Stream};
use tokio_codec::{Decoder, Encoder};
use tokio_io::{AsyncRead, AsyncWrite};
use super::framed_read::{framed_read2, framed_read2_with_buffer, FramedRead2};
use super::framed_write::{framed_write2, framed_write2_with_buffer, FramedWrite2};
use crate::{AsyncRead, AsyncWrite, Decoder, Encoder};
/// Low-water mark
const LW: usize = 1024;
/// High-water mark
const HW: usize = 8 * 1024;
/// A unified `Stream` and `Sink` interface to an underlying I/O object, using
/// the `Encoder` and `Decoder` traits to encode and decode frames.
///
/// You can create a `Framed` instance by using the `AsyncRead::framed` adapter.
pub struct Framed<T, U> {
inner: FramedRead2<FramedWrite2<Fuse<T, U>>>,
bitflags::bitflags! {
struct Flags: u8 {
const EOF = 0b0001;
const READABLE = 0b0010;
}
}
pub struct Fuse<T, U>(pub T, pub U);
pin_project_lite::pin_project! {
/// A unified `Stream` and `Sink` interface to an underlying I/O object, using
/// the `Encoder` and `Decoder` traits to encode and decode frames.
///
/// Raw I/O objects work with byte sequences, but higher-level code usually
/// wants to batch these into meaningful chunks, called "frames". This
/// method layers framing on top of an I/O object, by using the `Encoder`/`Decoder`
/// traits to handle encoding and decoding of message frames. Note that
/// the incoming and outgoing frame types may be distinct.
pub struct Framed<T, U> {
#[pin]
io: T,
codec: U,
flags: Flags,
read_buf: BytesMut,
write_buf: BytesMut,
}
}
impl<T, U> Framed<T, U>
where
T: AsyncRead + AsyncWrite,
U: Decoder + Encoder,
U: Decoder,
{
/// Provides a `Stream` and `Sink` interface for reading and writing to this
/// `Io` object, using `Decode` and `Encode` to read and write the raw data.
///
/// Raw I/O objects work with byte sequences, but higher-level code usually
/// wants to batch these into meaningful chunks, called "frames". This
/// method layers framing on top of an I/O object, by using the `Codec`
/// traits to handle encoding and decoding of messages frames. Note that
/// the incoming and outgoing frame types may be distinct.
///
/// This function returns a *single* object that is both `Stream` and
/// `Sink`; grouping this into a single object is often useful for layering
/// things like gzip or TLS, which require both read and write access to the
/// underlying object.
///
/// If you want to work more directly with the streams and sink, consider
/// calling `split` on the `Framed` returned by this method, which will
/// break them into separate objects, allowing them to interact more easily.
pub fn new(inner: T, codec: U) -> Framed<T, U> {
pub fn new(io: T, codec: U) -> Framed<T, U> {
Framed {
inner: framed_read2(framed_write2(Fuse(inner, codec), LW, HW)),
io,
codec,
flags: Flags::empty(),
read_buf: BytesMut::with_capacity(HW),
write_buf: BytesMut::with_capacity(HW),
}
}
/// Same as `Framed::new()` with ability to specify write buffer low/high capacity watermarks.
pub fn new_with_caps(inner: T, codec: U, lw: usize, hw: usize) -> Framed<T, U> {
debug_assert!((lw < hw) && hw != 0);
Framed {
inner: framed_read2(framed_write2(Fuse(inner, codec), lw, hw)),
}
}
/// Force send item
pub fn force_send(
&mut self,
item: <U as Encoder>::Item,
) -> Result<(), <U as Encoder>::Error> {
self.inner.get_mut().force_send(item)
}
}
impl<T, U> Framed<T, U> {
/// Provides a `Stream` and `Sink` interface for reading and writing to this
/// `Io` object, using `Decode` and `Encode` to read and write the raw data.
///
/// Raw I/O objects work with byte sequences, but higher-level code usually
/// wants to batch these into meaningful chunks, called "frames". This
/// method layers framing on top of an I/O object, by using the `Codec`
/// traits to handle encoding and decoding of messages frames. Note that
/// the incoming and outgoing frame types may be distinct.
///
/// This function returns a *single* object that is both `Stream` and
/// `Sink`; grouping this into a single object is often useful for layering
/// things like gzip or TLS, which require both read and write access to the
/// underlying object.
///
/// This objects takes a stream and a readbuffer and a writebuffer. These
/// field can be obtained from an existing `Framed` with the
/// `into_parts` method.
///
/// If you want to work more directly with the streams and sink, consider
/// calling `split` on the `Framed` returned by this method, which will
/// break them into separate objects, allowing them to interact more easily.
pub fn from_parts(parts: FramedParts<T, U>) -> Framed<T, U> {
Framed {
inner: framed_read2_with_buffer(
framed_write2_with_buffer(
Fuse(parts.io, parts.codec),
parts.write_buf,
parts.write_buf_lw,
parts.write_buf_hw,
),
parts.read_buf,
),
}
}
/// Returns a reference to the underlying codec.
pub fn get_codec(&self) -> &U {
&self.inner.get_ref().get_ref().1
pub fn codec_ref(&self) -> &U {
&self.codec
}
/// Returns a mutable reference to the underlying codec.
pub fn get_codec_mut(&mut self) -> &mut U {
&mut self.inner.get_mut().get_mut().1
pub fn codec_mut(&mut self) -> &mut U {
&mut self.codec
}
/// Returns a reference to the underlying I/O stream wrapped by
@@ -121,81 +76,285 @@ impl<T, U> Framed<T, U> {
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn get_ref(&self) -> &T {
&self.inner.get_ref().get_ref().0
pub fn io_ref(&self) -> &T {
&self.io
}
/// Returns a mutable reference to the underlying I/O stream wrapped by
/// `Frame`.
/// Returns a mutable reference to the underlying I/O stream.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner.get_mut().get_mut().0
pub fn io_mut(&mut self) -> &mut T {
&mut self.io
}
/// Returns a `Pin` of a mutable reference to the underlying I/O stream.
pub fn io_pin(self: Pin<&mut Self>) -> Pin<&mut T> {
self.project().io
}
/// Check if read buffer is empty.
pub fn is_read_buf_empty(&self) -> bool {
self.read_buf.is_empty()
}
/// Check if write buffer is empty.
pub fn is_write_buf_empty(&self) -> bool {
self.inner.get_ref().is_empty()
self.write_buf.is_empty()
}
/// Check if write buffer is full.
pub fn is_write_buf_full(&self) -> bool {
self.inner.get_ref().is_full()
self.write_buf.len() >= HW
}
/// Consumes the `Frame`, returning its underlying I/O stream.
/// Check if framed is able to write more data.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn into_inner(self) -> T {
self.inner.into_inner().into_inner().0
/// `Framed` object considers ready if there is free space in write buffer.
pub fn is_write_ready(&self) -> bool {
self.write_buf.len() < HW
}
/// Consume the `Frame`, returning `Frame` with different codec.
pub fn into_framed<U2>(self, codec: U2) -> Framed<T, U2> {
let (inner, read_buf) = self.inner.into_parts();
let (inner, write_buf, lw, hw) = inner.into_parts();
pub fn replace_codec<U2>(self, codec: U2) -> Framed<T, U2> {
Framed {
inner: framed_read2_with_buffer(
framed_write2_with_buffer(Fuse(inner.0, codec), write_buf, lw, hw),
read_buf,
),
codec,
io: self.io,
flags: self.flags,
read_buf: self.read_buf,
write_buf: self.write_buf,
}
}
/// Consume the `Frame`, returning `Frame` with different io.
pub fn map_io<F, T2>(self, f: F) -> Framed<T2, U>
pub fn into_map_io<F, T2>(self, f: F) -> Framed<T2, U>
where
F: Fn(T) -> T2,
{
let (inner, read_buf) = self.inner.into_parts();
let (inner, write_buf, lw, hw) = inner.into_parts();
Framed {
inner: framed_read2_with_buffer(
framed_write2_with_buffer(Fuse(f(inner.0), inner.1), write_buf, lw, hw),
read_buf,
),
io: f(self.io),
codec: self.codec,
flags: self.flags,
read_buf: self.read_buf,
write_buf: self.write_buf,
}
}
/// Consume the `Frame`, returning `Frame` with different codec.
pub fn map_codec<F, U2>(self, f: F) -> Framed<T, U2>
pub fn into_map_codec<F, U2>(self, f: F) -> Framed<T, U2>
where
F: Fn(U) -> U2,
{
let (inner, read_buf) = self.inner.into_parts();
let (inner, write_buf, lw, hw) = inner.into_parts();
Framed {
inner: framed_read2_with_buffer(
framed_write2_with_buffer(Fuse(inner.0, f(inner.1)), write_buf, lw, hw),
read_buf,
),
io: self.io,
codec: f(self.codec),
flags: self.flags,
read_buf: self.read_buf,
write_buf: self.write_buf,
}
}
}
impl<T, U> Framed<T, U> {
/// Serialize item and Write to the inner buffer
pub fn write<I>(mut self: Pin<&mut Self>, item: I) -> Result<(), <U as Encoder<I>>::Error>
where
T: AsyncWrite,
U: Encoder<I>,
{
let this = self.as_mut().project();
let remaining = this.write_buf.capacity() - this.write_buf.len();
if remaining < LW {
this.write_buf.reserve(HW - remaining);
}
this.codec.encode(item, this.write_buf)?;
Ok(())
}
/// Try to read underlying I/O stream and decode item.
pub fn next_item(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Option<Result<<U as Decoder>::Item, U::Error>>>
where
T: AsyncRead,
U: Decoder,
{
loop {
let mut this = self.as_mut().project();
// Repeatedly call `decode` or `decode_eof` as long as it is
// "readable". Readable is defined as not having returned `None`. If
// the upstream has returned EOF, and the decoder is no longer
// readable, it can be assumed that the decoder will never become
// readable again, at which point the stream is terminated.
if this.flags.contains(Flags::READABLE) {
if this.flags.contains(Flags::EOF) {
match this.codec.decode_eof(&mut this.read_buf) {
Ok(Some(frame)) => return Poll::Ready(Some(Ok(frame))),
Ok(None) => return Poll::Ready(None),
Err(e) => return Poll::Ready(Some(Err(e))),
}
}
log::trace!("attempting to decode a frame");
match this.codec.decode(&mut this.read_buf) {
Ok(Some(frame)) => {
log::trace!("frame decoded from buffer");
return Poll::Ready(Some(Ok(frame)));
}
Err(e) => return Poll::Ready(Some(Err(e))),
_ => (), // Need more data
}
this.flags.remove(Flags::READABLE);
}
debug_assert!(!this.flags.contains(Flags::EOF));
// Otherwise, try to read more data and try again. Make sure we've got room
let remaining = this.read_buf.capacity() - this.read_buf.len();
if remaining < LW {
this.read_buf.reserve(HW - remaining)
}
let cnt = match tokio_util::io::poll_read_buf(this.io, cx, this.read_buf) {
Poll::Pending => return Poll::Pending,
Poll::Ready(Err(e)) => return Poll::Ready(Some(Err(e.into()))),
Poll::Ready(Ok(cnt)) => cnt,
};
if cnt == 0 {
this.flags.insert(Flags::EOF);
}
this.flags.insert(Flags::READABLE);
}
}
/// Flush write buffer to underlying I/O stream.
pub fn flush<I>(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), U::Error>>
where
T: AsyncWrite,
U: Encoder<I>,
{
let mut this = self.as_mut().project();
log::trace!("flushing framed transport");
while !this.write_buf.is_empty() {
log::trace!("writing; remaining={}", this.write_buf.len());
let n = ready!(this.io.as_mut().poll_write(cx, this.write_buf))?;
if n == 0 {
return Poll::Ready(Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to write frame to transport",
)
.into()));
}
// remove written data
this.write_buf.advance(n);
}
// Try flushing the underlying IO
ready!(this.io.poll_flush(cx))?;
log::trace!("framed transport flushed");
Poll::Ready(Ok(()))
}
/// Flush write buffer and shutdown underlying I/O stream.
pub fn close<I>(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
) -> Poll<Result<(), U::Error>>
where
T: AsyncWrite,
U: Encoder<I>,
{
let mut this = self.as_mut().project();
ready!(this.io.as_mut().poll_flush(cx))?;
ready!(this.io.as_mut().poll_shutdown(cx))?;
Poll::Ready(Ok(()))
}
}
impl<T, U> Stream for Framed<T, U>
where
T: AsyncRead,
U: Decoder,
{
type Item = Result<U::Item, U::Error>;
fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
self.next_item(cx)
}
}
impl<T, U, I> Sink<I> for Framed<T, U>
where
T: AsyncWrite,
U: Encoder<I>,
U::Error: From<io::Error>,
{
type Error = U::Error;
fn poll_ready(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
if self.is_write_ready() {
Poll::Ready(Ok(()))
} else {
Poll::Pending
}
}
fn start_send(self: Pin<&mut Self>, item: I) -> Result<(), Self::Error> {
self.write(item)
}
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.flush(cx)
}
fn poll_close(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.close(cx)
}
}
impl<T, U> fmt::Debug for Framed<T, U>
where
T: fmt::Debug,
U: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
f.debug_struct("Framed")
.field("io", &self.io)
.field("codec", &self.codec)
.finish()
}
}
impl<T, U> Framed<T, U> {
/// This function returns a *single* object that is both `Stream` and
/// `Sink`; grouping this into a single object is often useful for layering
/// things like gzip or TLS, which require both read and write access to the
/// underlying object.
///
/// These objects take a stream, a read buffer and a write buffer. These
/// fields can be obtained from an existing `Framed` with the `into_parts` method.
pub fn from_parts(parts: FramedParts<T, U>) -> Framed<T, U> {
Framed {
io: parts.io,
codec: parts.codec,
flags: parts.flags,
write_buf: parts.write_buf,
read_buf: parts.read_buf,
}
}
@@ -206,124 +365,16 @@ impl<T, U> Framed<T, U> {
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn into_parts(self) -> FramedParts<T, U> {
let (inner, read_buf) = self.inner.into_parts();
let (inner, write_buf, write_buf_lw, write_buf_hw) = inner.into_parts();
FramedParts {
io: inner.0,
codec: inner.1,
read_buf,
write_buf,
write_buf_lw,
write_buf_hw,
_priv: (),
io: self.io,
codec: self.codec,
flags: self.flags,
read_buf: self.read_buf,
write_buf: self.write_buf,
}
}
}
impl<T, U> Stream for Framed<T, U>
where
T: AsyncRead,
U: Decoder,
{
type Item = U::Item;
type Error = U::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
self.inner.poll()
}
}
impl<T, U> Sink for Framed<T, U>
where
T: AsyncWrite,
U: Encoder,
U::Error: From<io::Error>,
{
type SinkItem = U::Item;
type SinkError = U::Error;
fn start_send(
&mut self,
item: Self::SinkItem,
) -> StartSend<Self::SinkItem, Self::SinkError> {
self.inner.get_mut().start_send(item)
}
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
self.inner.get_mut().poll_complete()
}
fn close(&mut self) -> Poll<(), Self::SinkError> {
self.inner.get_mut().close()
}
}
impl<T, U> fmt::Debug for Framed<T, U>
where
T: fmt::Debug,
U: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("Framed")
.field("io", &self.inner.get_ref().get_ref().0)
.field("codec", &self.inner.get_ref().get_ref().1)
.finish()
}
}
// ===== impl Fuse =====
impl<T: Read, U> Read for Fuse<T, U> {
fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
self.0.read(dst)
}
}
impl<T: AsyncRead, U> AsyncRead for Fuse<T, U> {
unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool {
self.0.prepare_uninitialized_buffer(buf)
}
}
impl<T: Write, U> Write for Fuse<T, U> {
fn write(&mut self, src: &[u8]) -> io::Result<usize> {
self.0.write(src)
}
fn flush(&mut self) -> io::Result<()> {
self.0.flush()
}
}
impl<T: AsyncWrite, U> AsyncWrite for Fuse<T, U> {
fn shutdown(&mut self) -> Poll<(), io::Error> {
self.0.shutdown()
}
}
impl<T, U: Decoder> Decoder for Fuse<T, U> {
type Item = U::Item;
type Error = U::Error;
fn decode(&mut self, buffer: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
self.1.decode(buffer)
}
fn decode_eof(&mut self, buffer: &mut BytesMut) -> Result<Option<Self::Item>, Self::Error> {
self.1.decode_eof(buffer)
}
}
impl<T, U: Encoder> Encoder for Fuse<T, U> {
type Item = U::Item;
type Error = U::Error;
fn encode(&mut self, item: Self::Item, dst: &mut BytesMut) -> Result<(), Self::Error> {
self.1.encode(item, dst)
}
}
/// `FramedParts` contains an export of the data of a Framed transport.
/// It can be used to construct a new `Framed` with a different codec.
/// It contains all current buffers and the inner transport.
@@ -341,15 +392,7 @@ pub struct FramedParts<T, U> {
/// A buffer with unprocessed data which are not written yet.
pub write_buf: BytesMut,
/// A buffer low watermark capacity
pub write_buf_lw: usize,
/// A buffer high watermark capacity
pub write_buf_hw: usize,
/// This private field allows us to add additional fields in the future in a
/// backwards compatible way.
_priv: (),
flags: Flags,
}
impl<T, U> FramedParts<T, U> {
@@ -358,11 +401,9 @@ impl<T, U> FramedParts<T, U> {
FramedParts {
io,
codec,
flags: Flags::empty(),
read_buf: BytesMut::new(),
write_buf: BytesMut::new(),
write_buf_lw: LW,
write_buf_hw: HW,
_priv: (),
}
}
@@ -372,10 +413,8 @@ impl<T, U> FramedParts<T, U> {
io,
codec,
read_buf,
flags: Flags::empty(),
write_buf: BytesMut::new(),
write_buf_lw: LW,
write_buf_hw: HW,
_priv: (),
}
}
}

View File

@@ -1,218 +0,0 @@
use std::fmt;
use bytes::BytesMut;
use futures::{try_ready, Async, Poll, Sink, StartSend, Stream};
use log::trace;
use tokio_codec::Decoder;
use tokio_io::AsyncRead;
use super::framed::Fuse;
/// A `Stream` of messages decoded from an `AsyncRead`.
pub struct FramedRead<T, D> {
inner: FramedRead2<Fuse<T, D>>,
}
pub struct FramedRead2<T> {
inner: T,
eof: bool,
is_readable: bool,
buffer: BytesMut,
}
const INITIAL_CAPACITY: usize = 8 * 1024;
// ===== impl FramedRead =====
impl<T, D> FramedRead<T, D>
where
T: AsyncRead,
D: Decoder,
{
/// Creates a new `FramedRead` with the given `decoder`.
pub fn new(inner: T, decoder: D) -> FramedRead<T, D> {
FramedRead {
inner: framed_read2(Fuse(inner, decoder)),
}
}
}
impl<T, D> FramedRead<T, D> {
/// Returns a reference to the underlying I/O stream wrapped by
/// `FramedRead`.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn get_ref(&self) -> &T {
&self.inner.inner.0
}
/// Returns a mutable reference to the underlying I/O stream wrapped by
/// `FramedRead`.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner.inner.0
}
/// Consumes the `FramedRead`, returning its underlying I/O stream.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn into_inner(self) -> T {
self.inner.inner.0
}
/// Returns a reference to the underlying decoder.
pub fn decoder(&self) -> &D {
&self.inner.inner.1
}
/// Returns a mutable reference to the underlying decoder.
pub fn decoder_mut(&mut self) -> &mut D {
&mut self.inner.inner.1
}
}
impl<T, D> Stream for FramedRead<T, D>
where
T: AsyncRead,
D: Decoder,
{
type Item = D::Item;
type Error = D::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
self.inner.poll()
}
}
impl<T, D> Sink for FramedRead<T, D>
where
T: Sink,
{
type SinkItem = T::SinkItem;
type SinkError = T::SinkError;
fn start_send(
&mut self,
item: Self::SinkItem,
) -> StartSend<Self::SinkItem, Self::SinkError> {
self.inner.inner.0.start_send(item)
}
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
self.inner.inner.0.poll_complete()
}
fn close(&mut self) -> Poll<(), Self::SinkError> {
self.inner.inner.0.close()
}
}
impl<T, D> fmt::Debug for FramedRead<T, D>
where
T: fmt::Debug,
D: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("FramedRead")
.field("inner", &self.inner.inner.0)
.field("decoder", &self.inner.inner.1)
.field("eof", &self.inner.eof)
.field("is_readable", &self.inner.is_readable)
.field("buffer", &self.inner.buffer)
.finish()
}
}
// ===== impl FramedRead2 =====
pub fn framed_read2<T>(inner: T) -> FramedRead2<T> {
FramedRead2 {
inner,
eof: false,
is_readable: false,
buffer: BytesMut::with_capacity(INITIAL_CAPACITY),
}
}
pub fn framed_read2_with_buffer<T>(inner: T, mut buf: BytesMut) -> FramedRead2<T> {
if buf.capacity() < INITIAL_CAPACITY {
let bytes_to_reserve = INITIAL_CAPACITY - buf.capacity();
buf.reserve(bytes_to_reserve);
}
FramedRead2 {
inner,
eof: false,
is_readable: !buf.is_empty(),
buffer: buf,
}
}
impl<T> FramedRead2<T> {
pub fn get_ref(&self) -> &T {
&self.inner
}
pub fn into_inner(self) -> T {
self.inner
}
pub fn into_parts(self) -> (T, BytesMut) {
(self.inner, self.buffer)
}
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
}
impl<T> Stream for FramedRead2<T>
where
T: AsyncRead + Decoder,
{
type Item = T::Item;
type Error = T::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
loop {
// Repeatedly call `decode` or `decode_eof` as long as it is
// "readable". Readable is defined as not having returned `None`. If
// the upstream has returned EOF, and the decoder is no longer
// readable, it can be assumed that the decoder will never become
// readable again, at which point the stream is terminated.
if self.is_readable {
if self.eof {
let frame = self.inner.decode_eof(&mut self.buffer)?;
return Ok(Async::Ready(frame));
}
trace!("attempting to decode a frame");
if let Some(frame) = self.inner.decode(&mut self.buffer)? {
trace!("frame decoded from buffer");
return Ok(Async::Ready(Some(frame)));
}
self.is_readable = false;
}
assert!(!self.eof);
// Otherwise, try to read more data and try again. Make sure we've
// got room for at least one byte to read to ensure that we don't
// get a spurious 0 that looks like EOF
self.buffer.reserve(1);
if 0 == try_ready!(self.inner.read_buf(&mut self.buffer)) {
self.eof = true;
}
self.is_readable = true;
}
}
}

View File

@@ -1,303 +0,0 @@
use std::fmt;
use std::io::{self, Read};
use bytes::BytesMut;
use futures::{try_ready, Async, AsyncSink, Poll, Sink, StartSend, Stream};
use log::trace;
use tokio_codec::{Decoder, Encoder};
use tokio_io::{AsyncRead, AsyncWrite};
use super::framed::Fuse;
/// A `Sink` of frames encoded to an `AsyncWrite`.
pub struct FramedWrite<T, E> {
inner: FramedWrite2<Fuse<T, E>>,
}
pub struct FramedWrite2<T> {
inner: T,
buffer: BytesMut,
low_watermark: usize,
high_watermark: usize,
}
impl<T, E> FramedWrite<T, E>
where
T: AsyncWrite,
E: Encoder,
{
/// Creates a new `FramedWrite` with the given `encoder`.
pub fn new(inner: T, encoder: E, lw: usize, hw: usize) -> FramedWrite<T, E> {
FramedWrite {
inner: framed_write2(Fuse(inner, encoder), lw, hw),
}
}
}
impl<T, E> FramedWrite<T, E> {
/// Returns a reference to the underlying I/O stream wrapped by
/// `FramedWrite`.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn get_ref(&self) -> &T {
&self.inner.inner.0
}
/// Returns a mutable reference to the underlying I/O stream wrapped by
/// `FramedWrite`.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner.inner.0
}
/// Consumes the `FramedWrite`, returning its underlying I/O stream.
///
/// Note that care should be taken to not tamper with the underlying stream
/// of data coming in as it may corrupt the stream of frames otherwise
/// being worked with.
pub fn into_inner(self) -> T {
self.inner.inner.0
}
/// Returns a reference to the underlying decoder.
pub fn encoder(&self) -> &E {
&self.inner.inner.1
}
/// Returns a mutable reference to the underlying decoder.
pub fn encoder_mut(&mut self) -> &mut E {
&mut self.inner.inner.1
}
/// Check if write buffer is full
pub fn is_full(&self) -> bool {
self.inner.is_full()
}
/// Check if write buffer is empty.
pub fn is_empty(&self) -> bool {
self.inner.is_empty()
}
}
impl<T, E> FramedWrite<T, E>
where
E: Encoder,
{
/// Force send item
pub fn force_send(&mut self, item: E::Item) -> Result<(), E::Error> {
self.inner.force_send(item)
}
}
impl<T, E> Sink for FramedWrite<T, E>
where
T: AsyncWrite,
E: Encoder,
{
type SinkItem = E::Item;
type SinkError = E::Error;
fn start_send(&mut self, item: E::Item) -> StartSend<E::Item, E::Error> {
self.inner.start_send(item)
}
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
self.inner.poll_complete()
}
fn close(&mut self) -> Poll<(), Self::SinkError> {
Ok(self.inner.close()?)
}
}
impl<T, D> Stream for FramedWrite<T, D>
where
T: Stream,
{
type Item = T::Item;
type Error = T::Error;
fn poll(&mut self) -> Poll<Option<Self::Item>, Self::Error> {
self.inner.inner.0.poll()
}
}
impl<T, U> fmt::Debug for FramedWrite<T, U>
where
T: fmt::Debug,
U: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_struct("FramedWrite")
.field("inner", &self.inner.get_ref().0)
.field("encoder", &self.inner.get_ref().1)
.field("buffer", &self.inner.buffer)
.finish()
}
}
// ===== impl FramedWrite2 =====
pub fn framed_write2<T>(
inner: T,
low_watermark: usize,
high_watermark: usize,
) -> FramedWrite2<T> {
FramedWrite2 {
inner,
low_watermark,
high_watermark,
buffer: BytesMut::with_capacity(high_watermark),
}
}
pub fn framed_write2_with_buffer<T>(
inner: T,
mut buffer: BytesMut,
low_watermark: usize,
high_watermark: usize,
) -> FramedWrite2<T> {
if buffer.capacity() < high_watermark {
let bytes_to_reserve = high_watermark - buffer.capacity();
buffer.reserve(bytes_to_reserve);
}
FramedWrite2 {
inner,
buffer,
low_watermark,
high_watermark,
}
}
impl<T> FramedWrite2<T> {
pub fn get_ref(&self) -> &T {
&self.inner
}
pub fn into_inner(self) -> T {
self.inner
}
pub fn into_parts(self) -> (T, BytesMut, usize, usize) {
(
self.inner,
self.buffer,
self.low_watermark,
self.high_watermark,
)
}
pub fn get_mut(&mut self) -> &mut T {
&mut self.inner
}
pub fn is_full(&self) -> bool {
self.buffer.len() >= self.high_watermark
}
pub fn is_empty(&self) -> bool {
self.buffer.is_empty()
}
}
impl<T> FramedWrite2<T>
where
T: Encoder,
{
pub fn force_send(&mut self, item: T::Item) -> Result<(), T::Error> {
let len = self.buffer.len();
if len < self.low_watermark {
self.buffer.reserve(self.high_watermark - len)
}
self.inner.encode(item, &mut self.buffer)?;
Ok(())
}
}
impl<T> Sink for FramedWrite2<T>
where
T: AsyncWrite + Encoder,
{
type SinkItem = T::Item;
type SinkError = T::Error;
fn start_send(&mut self, item: T::Item) -> StartSend<T::Item, T::Error> {
// Check the buffer capacity
let len = self.buffer.len();
if len >= self.high_watermark {
return Ok(AsyncSink::NotReady(item));
}
if len < self.low_watermark {
self.buffer.reserve(self.high_watermark - len)
}
self.inner.encode(item, &mut self.buffer)?;
Ok(AsyncSink::Ready)
}
fn poll_complete(&mut self) -> Poll<(), Self::SinkError> {
trace!("flushing framed transport");
while !self.buffer.is_empty() {
trace!("writing; remaining={}", self.buffer.len());
let n = try_ready!(self.inner.poll_write(&self.buffer));
if n == 0 {
return Err(io::Error::new(
io::ErrorKind::WriteZero,
"failed to \
write frame to transport",
)
.into());
}
// TODO: Add a way to `bytes` to do this w/o returning the drained
// data.
let _ = self.buffer.split_to(n);
}
// Try flushing the underlying IO
try_ready!(self.inner.poll_flush());
trace!("framed transport flushed");
Ok(Async::Ready(()))
}
fn close(&mut self) -> Poll<(), Self::SinkError> {
try_ready!(self.poll_complete());
Ok(self.inner.shutdown()?)
}
}
impl<T: Decoder> Decoder for FramedWrite2<T> {
type Item = T::Item;
type Error = T::Error;
fn decode(&mut self, src: &mut BytesMut) -> Result<Option<T::Item>, T::Error> {
self.inner.decode(src)
}
fn decode_eof(&mut self, src: &mut BytesMut) -> Result<Option<T::Item>, T::Error> {
self.inner.decode_eof(src)
}
}
impl<T: Read> Read for FramedWrite2<T> {
fn read(&mut self, dst: &mut [u8]) -> io::Result<usize> {
self.inner.read(dst)
}
}
impl<T: AsyncRead> AsyncRead for FramedWrite2<T> {
unsafe fn prepare_uninitialized_buffer(&self, buf: &mut [u8]) -> bool {
self.inner.prepare_uninitialized_buffer(buf)
}
}

View File

@@ -1,24 +1,23 @@
//! Utilities for encoding and decoding frames.
//! Codec utilities for working with framed protocols.
//!
//! Contains adapters to go from streams of bytes, [`AsyncRead`] and
//! [`AsyncWrite`], to framed streams implementing [`Sink`] and [`Stream`].
//! Framed streams are also known as [transports].
//! Framed streams are also known as `transports`.
//!
//! [`AsyncRead`]: #
//! [`AsyncWrite`]: #
//! [`Sink`]: #
//! [`Stream`]: #
//! [transports]: #
//! [`Sink`]: futures_sink::Sink
//! [`Stream`]: futures_core::Stream
#![deny(rust_2018_idioms, nonstandard_style)]
#![warn(missing_docs)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
mod bcodec;
mod framed;
mod framed_read;
mod framed_write;
pub use self::bcodec::BytesCodec;
pub use self::framed::{Framed, FramedParts};
pub use self::framed_read::FramedRead;
pub use self::framed_write::FramedWrite;
pub use tokio_codec::{Decoder, Encoder};
pub use tokio_io::{AsyncRead, AsyncWrite};
pub use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
pub use tokio_util::codec::{Decoder, Encoder};
pub use tokio_util::io::poll_read_buf;

View File

@@ -1,79 +0,0 @@
# Changes
## [0.2.4] - 2019-09-02
* Use arbiter's storage for default async resolver
## [0.2.3] - 2019-08-05
* Add `ConnectService` and `OpensslConnectService`
## [0.2.2] - 2019-07-24
* Add `rustls` support
## [0.2.1] - 2019-07-17
### Added
* Expose Connect addrs #30
### Changed
* Update `derive_more` to 0.15
## [0.2.0] - 2019-05-12
### Changed
* Upgrade to actix-service 0.4
## [0.1.5] - 2019-04-19
### Added
* `Connect::set_addr()`
### Changed
* Use trust-dns-resolver 0.11.0
## [0.1.4] - 2019-04-12
### Changed
* Do not start default resolver immediately for default connector.
## [0.1.3] - 2019-04-11
### Changed
* Start trust-dns default resolver on first use
## [0.1.2] - 2019-04-04
### Added
* Log error if dns system config could not be loaded.
### Changed
* Rename connect Connector to TcpConnector #10
## [0.1.1] - 2019-03-15
### Fixed
* Fix error handling for single address
## [0.1.0] - 2019-03-14
* Refactor resolver and connector services
* Rename crate

View File

@@ -1,61 +0,0 @@
[package]
name = "actix-connect"
version = "0.2.4"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix Connector - tcp connector service"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-connect/"
categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0"
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
edition = "2018"
workspace = ".."
[package.metadata.docs.rs]
features = ["ssl", "uri"]
[lib]
name = "actix_connect"
path = "src/lib.rs"
[features]
default = ["uri"]
# openssl
ssl = ["openssl", "tokio-openssl"]
#rustls
rust-tls = ["rustls", "tokio-rustls", "webpki"]
# support http::Uri as connect address
uri = ["http"]
[dependencies]
actix-service = "0.4.0"
actix-codec = "0.1.2"
actix-utils = "0.4.0"
actix-rt = "0.2.5"
derive_more = "0.15"
either = "1.5.2"
futures = "0.1.25"
http = { version = "0.1.17", optional = true }
log = "0.4"
tokio-tcp = "0.1.3"
tokio-current-thread = "0.1.5"
trust-dns-resolver = { version="0.11.0", default-features = false }
# openssl
openssl = { version="0.10", optional = true }
tokio-openssl = { version="0.3", optional = true }
#rustls
rustls = { version = "0.15.2", optional = true }
tokio-rustls = { version = "0.9.1", optional = true }
webpki = { version = "0.19", optional = true }
[dev-dependencies]
bytes = "0.4"
actix-test-server = { version="0.2.2", features=["ssl"] }
actix-server-config = "0.1.0"

View File

@@ -1,168 +0,0 @@
use std::collections::VecDeque;
use std::marker::PhantomData;
use std::net::SocketAddr;
use actix_service::{NewService, Service};
use futures::future::{err, ok, Either, FutureResult};
use futures::{Async, Future, Poll};
use tokio_tcp::{ConnectFuture, TcpStream};
use super::connect::{Address, Connect, Connection};
use super::error::ConnectError;
/// Tcp connector service factory
#[derive(Debug)]
pub struct TcpConnectorFactory<T>(PhantomData<T>);
impl<T> TcpConnectorFactory<T> {
pub fn new() -> Self {
TcpConnectorFactory(PhantomData)
}
/// Create tcp connector service
pub fn service(&self) -> TcpConnector<T> {
TcpConnector(PhantomData)
}
}
impl<T> Default for TcpConnectorFactory<T> {
fn default() -> Self {
TcpConnectorFactory(PhantomData)
}
}
impl<T> Clone for TcpConnectorFactory<T> {
fn clone(&self) -> Self {
TcpConnectorFactory(PhantomData)
}
}
impl<T: Address> NewService for TcpConnectorFactory<T> {
type Request = Connect<T>;
type Response = Connection<T, TcpStream>;
type Error = ConnectError;
type Config = ();
type Service = TcpConnector<T>;
type InitError = ();
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self, _: &()) -> Self::Future {
ok(self.service())
}
}
/// Tcp connector service
#[derive(Default, Debug)]
pub struct TcpConnector<T>(PhantomData<T>);
impl<T> TcpConnector<T> {
pub fn new() -> Self {
TcpConnector(PhantomData)
}
}
impl<T> Clone for TcpConnector<T> {
fn clone(&self) -> Self {
TcpConnector(PhantomData)
}
}
impl<T: Address> Service for TcpConnector<T> {
type Request = Connect<T>;
type Response = Connection<T, TcpStream>;
type Error = ConnectError;
type Future = Either<TcpConnectorResponse<T>, FutureResult<Self::Response, Self::Error>>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, req: Connect<T>) -> Self::Future {
let port = req.port();
let Connect { req, addr, .. } = req;
if let Some(addr) = addr {
Either::A(TcpConnectorResponse::new(req, port, addr))
} else {
error!("TCP connector: got unresolved address");
Either::B(err(ConnectError::Unresolverd))
}
}
}
#[doc(hidden)]
/// Tcp stream connector response future
pub struct TcpConnectorResponse<T> {
req: Option<T>,
port: u16,
addrs: Option<VecDeque<SocketAddr>>,
stream: Option<ConnectFuture>,
}
impl<T: Address> TcpConnectorResponse<T> {
pub fn new(
req: T,
port: u16,
addr: either::Either<SocketAddr, VecDeque<SocketAddr>>,
) -> TcpConnectorResponse<T> {
trace!(
"TCP connector - connecting to {:?} port:{}",
req.host(),
port
);
match addr {
either::Either::Left(addr) => TcpConnectorResponse {
req: Some(req),
port,
addrs: None,
stream: Some(TcpStream::connect(&addr)),
},
either::Either::Right(addrs) => TcpConnectorResponse {
req: Some(req),
port,
addrs: Some(addrs),
stream: None,
},
}
}
}
impl<T: Address> Future for TcpConnectorResponse<T> {
type Item = Connection<T, TcpStream>;
type Error = ConnectError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
// connect
loop {
if let Some(new) = self.stream.as_mut() {
match new.poll() {
Ok(Async::Ready(sock)) => {
let req = self.req.take().unwrap();
trace!(
"TCP connector - successfully connected to connecting to {:?} - {:?}",
req.host(), sock.peer_addr()
);
return Ok(Async::Ready(Connection::new(sock, req)));
}
Ok(Async::NotReady) => return Ok(Async::NotReady),
Err(err) => {
trace!(
"TCP connector - failed to connect to connecting to {:?} port: {}",
self.req.as_ref().unwrap().host(),
self.port,
);
if self.addrs.is_none() || self.addrs.as_ref().unwrap().is_empty() {
return Err(err.into());
}
}
}
}
// try to connect
self.stream = Some(TcpStream::connect(
&self.addrs.as_mut().unwrap().pop_front().unwrap(),
));
}
}
}

View File

@@ -1,106 +0,0 @@
//! Actix connect - tcp connector service
//!
//! ## Package feature
//!
//! * `ssl` - enables ssl support via `openssl` crate
//! * `rust-tls` - enables ssl support via `rustls` crate
#![recursion_limit = "128"]
#[macro_use]
extern crate log;
mod connect;
mod connector;
mod error;
mod resolver;
mod service;
pub mod ssl;
#[cfg(feature = "uri")]
mod uri;
pub use trust_dns_resolver::config::{ResolverConfig, ResolverOpts};
pub use trust_dns_resolver::system_conf::read_system_conf;
pub use trust_dns_resolver::{error::ResolveError, AsyncResolver};
pub use self::connect::{Address, Connect, Connection};
pub use self::connector::{TcpConnector, TcpConnectorFactory};
pub use self::error::ConnectError;
pub use self::resolver::{Resolver, ResolverFactory};
pub use self::service::{ConnectService, ConnectServiceFactory};
use actix_rt::Arbiter;
use actix_service::{NewService, Service, ServiceExt};
use tokio_tcp::TcpStream;
pub fn start_resolver(cfg: ResolverConfig, opts: ResolverOpts) -> AsyncResolver {
let (resolver, bg) = AsyncResolver::new(cfg, opts);
tokio_current_thread::spawn(bg);
resolver
}
struct DefaultResolver(AsyncResolver);
pub(crate) fn get_default_resolver() -> AsyncResolver {
if Arbiter::contains_item::<DefaultResolver>() {
return Arbiter::get_item(|item: &DefaultResolver| item.0.clone());
} else {
let (cfg, opts) = match read_system_conf() {
Ok((cfg, opts)) => (cfg, opts),
Err(e) => {
log::error!("TRust-DNS can not load system config: {}", e);
(ResolverConfig::default(), ResolverOpts::default())
}
};
let (resolver, bg) = AsyncResolver::new(cfg, opts);
tokio_current_thread::spawn(bg);
Arbiter::set_item(DefaultResolver(resolver.clone()));
resolver
}
}
pub fn start_default_resolver() -> AsyncResolver {
get_default_resolver()
}
/// Create tcp connector service
pub fn new_connector<T: Address>(
resolver: AsyncResolver,
) -> impl Service<Request = Connect<T>, Response = Connection<T, TcpStream>, Error = ConnectError>
+ Clone {
Resolver::new(resolver).and_then(TcpConnector::new())
}
/// Create tcp connector service
pub fn new_connector_factory<T: Address>(
resolver: AsyncResolver,
) -> impl NewService<
Config = (),
Request = Connect<T>,
Response = Connection<T, TcpStream>,
Error = ConnectError,
InitError = (),
> + Clone {
ResolverFactory::new(resolver).and_then(TcpConnectorFactory::new())
}
/// Create connector service with default parameters
pub fn default_connector<T: Address>(
) -> impl Service<Request = Connect<T>, Response = Connection<T, TcpStream>, Error = ConnectError>
+ Clone {
Resolver::default().and_then(TcpConnector::new())
}
/// Create connector service factory with default parameters
pub fn default_connector_factory<T: Address>() -> impl NewService<
Config = (),
Request = Connect<T>,
Response = Connection<T, TcpStream>,
Error = ConnectError,
InitError = (),
> + Clone {
ResolverFactory::default().and_then(TcpConnectorFactory::new())
}

View File

@@ -1,123 +0,0 @@
use actix_service::{NewService, Service};
use futures::future::{ok, FutureResult};
use futures::{try_ready, Async, Future, Poll};
use tokio_tcp::TcpStream;
use trust_dns_resolver::AsyncResolver;
use crate::connect::{Address, Connect, Connection};
use crate::connector::{TcpConnector, TcpConnectorFactory};
use crate::error::ConnectError;
use crate::resolver::{Resolver, ResolverFactory};
pub struct ConnectServiceFactory<T> {
tcp: TcpConnectorFactory<T>,
resolver: ResolverFactory<T>,
}
impl<T> ConnectServiceFactory<T> {
/// Construct new ConnectService factory
pub fn new() -> Self {
ConnectServiceFactory {
tcp: TcpConnectorFactory::default(),
resolver: ResolverFactory::default(),
}
}
/// Construct new connect service with custom dns resolver
pub fn with_resolver(resolver: AsyncResolver) -> Self {
ConnectServiceFactory {
tcp: TcpConnectorFactory::default(),
resolver: ResolverFactory::new(resolver),
}
}
/// Construct new service
pub fn service(&self) -> ConnectService<T> {
ConnectService {
tcp: self.tcp.service(),
resolver: self.resolver.service(),
}
}
}
impl<T> Default for ConnectServiceFactory<T> {
fn default() -> Self {
ConnectServiceFactory {
tcp: TcpConnectorFactory::default(),
resolver: ResolverFactory::default(),
}
}
}
impl<T> Clone for ConnectServiceFactory<T> {
fn clone(&self) -> Self {
ConnectServiceFactory {
tcp: self.tcp.clone(),
resolver: self.resolver.clone(),
}
}
}
impl<T: Address> NewService for ConnectServiceFactory<T> {
type Request = Connect<T>;
type Response = Connection<T, TcpStream>;
type Error = ConnectError;
type Config = ();
type Service = ConnectService<T>;
type InitError = ();
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self, _: &()) -> Self::Future {
ok(self.service())
}
}
#[derive(Clone)]
pub struct ConnectService<T> {
tcp: TcpConnector<T>,
resolver: Resolver<T>,
}
impl<T: Address> Service for ConnectService<T> {
type Request = Connect<T>;
type Response = Connection<T, TcpStream>;
type Error = ConnectError;
type Future = ConnectServiceResponse<T>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, req: Connect<T>) -> Self::Future {
ConnectServiceResponse {
fut1: Some(self.resolver.call(req)),
fut2: None,
tcp: self.tcp.clone(),
}
}
}
pub struct ConnectServiceResponse<T: Address> {
fut1: Option<<Resolver<T> as Service>::Future>,
fut2: Option<<TcpConnector<T> as Service>::Future>,
tcp: TcpConnector<T>,
}
impl<T: Address> Future for ConnectServiceResponse<T> {
type Item = Connection<T, TcpStream>;
type Error = ConnectError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if let Some(ref mut fut) = self.fut1 {
let res = try_ready!(fut.poll());
let _ = self.fut1.take();
self.fut2 = Some(self.tcp.call(res));
}
if let Some(ref mut fut) = self.fut2 {
return fut.poll();
}
Ok(Async::NotReady)
}
}

View File

@@ -1,12 +0,0 @@
//! SSL Services
#[cfg(feature = "ssl")]
mod openssl;
#[cfg(feature = "ssl")]
pub use self::openssl::{
OpensslConnectService, OpensslConnectServiceFactory, OpensslConnector,
};
#[cfg(feature = "rust-tls")]
mod rustls;
#[cfg(feature = "rust-tls")]
pub use self::rustls::RustlsConnector;

View File

@@ -1,251 +0,0 @@
use std::marker::PhantomData;
use std::{fmt, io};
use actix_codec::{AsyncRead, AsyncWrite};
use actix_service::{NewService, Service};
use futures::{future::ok, future::FutureResult, try_ready, Async, Future, Poll};
use openssl::ssl::{HandshakeError, SslConnector};
use tokio_openssl::{ConnectAsync, SslConnectorExt, SslStream};
use tokio_tcp::TcpStream;
use trust_dns_resolver::AsyncResolver;
use crate::{
Address, Connect, ConnectError, ConnectService, ConnectServiceFactory, Connection,
};
/// Openssl connector factory
pub struct OpensslConnector<T, U> {
connector: SslConnector,
_t: PhantomData<(T, U)>,
}
impl<T, U> OpensslConnector<T, U> {
pub fn new(connector: SslConnector) -> Self {
OpensslConnector {
connector,
_t: PhantomData,
}
}
}
impl<T, U> OpensslConnector<T, U>
where
T: Address,
U: AsyncRead + AsyncWrite + fmt::Debug,
{
pub fn service(
connector: SslConnector,
) -> impl Service<
Request = Connection<T, U>,
Response = Connection<T, SslStream<U>>,
Error = HandshakeError<U>,
> {
OpensslConnectorService {
connector: connector,
_t: PhantomData,
}
}
}
impl<T, U> Clone for OpensslConnector<T, U> {
fn clone(&self) -> Self {
Self {
connector: self.connector.clone(),
_t: PhantomData,
}
}
}
impl<T: Address, U> NewService for OpensslConnector<T, U>
where
U: AsyncRead + AsyncWrite + fmt::Debug,
{
type Request = Connection<T, U>;
type Response = Connection<T, SslStream<U>>;
type Error = HandshakeError<U>;
type Config = ();
type Service = OpensslConnectorService<T, U>;
type InitError = ();
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self, _: &()) -> Self::Future {
ok(OpensslConnectorService {
connector: self.connector.clone(),
_t: PhantomData,
})
}
}
pub struct OpensslConnectorService<T, U> {
connector: SslConnector,
_t: PhantomData<(T, U)>,
}
impl<T, U> Clone for OpensslConnectorService<T, U> {
fn clone(&self) -> Self {
Self {
connector: self.connector.clone(),
_t: PhantomData,
}
}
}
impl<T: Address, U> Service for OpensslConnectorService<T, U>
where
U: AsyncRead + AsyncWrite + fmt::Debug,
{
type Request = Connection<T, U>;
type Response = Connection<T, SslStream<U>>;
type Error = HandshakeError<U>;
type Future = ConnectAsyncExt<T, U>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, stream: Connection<T, U>) -> Self::Future {
trace!("SSL Handshake start for: {:?}", stream.host());
let (io, stream) = stream.replace(());
ConnectAsyncExt {
fut: SslConnectorExt::connect_async(&self.connector, stream.host(), io),
stream: Some(stream),
}
}
}
pub struct ConnectAsyncExt<T, U> {
fut: ConnectAsync<U>,
stream: Option<Connection<T, ()>>,
}
impl<T: Address, U> Future for ConnectAsyncExt<T, U>
where
U: AsyncRead + AsyncWrite + fmt::Debug,
{
type Item = Connection<T, SslStream<U>>;
type Error = HandshakeError<U>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.fut.poll().map_err(|e| {
trace!("SSL Handshake error: {:?}", e);
e
})? {
Async::Ready(stream) => {
let s = self.stream.take().unwrap();
trace!("SSL Handshake success: {:?}", s.host());
Ok(Async::Ready(s.replace(stream).1))
}
Async::NotReady => Ok(Async::NotReady),
}
}
}
pub struct OpensslConnectServiceFactory<T> {
tcp: ConnectServiceFactory<T>,
openssl: OpensslConnector<T, TcpStream>,
}
impl<T> OpensslConnectServiceFactory<T> {
/// Construct new OpensslConnectService factory
pub fn new(connector: SslConnector) -> Self {
OpensslConnectServiceFactory {
tcp: ConnectServiceFactory::default(),
openssl: OpensslConnector::new(connector),
}
}
/// Construct new connect service with custom dns resolver
pub fn with_resolver(connector: SslConnector, resolver: AsyncResolver) -> Self {
OpensslConnectServiceFactory {
tcp: ConnectServiceFactory::with_resolver(resolver),
openssl: OpensslConnector::new(connector),
}
}
/// Construct openssl connect service
pub fn service(&self) -> OpensslConnectService<T> {
OpensslConnectService {
tcp: self.tcp.service(),
openssl: OpensslConnectorService {
connector: self.openssl.connector.clone(),
_t: PhantomData,
},
}
}
}
impl<T> Clone for OpensslConnectServiceFactory<T> {
fn clone(&self) -> Self {
OpensslConnectServiceFactory {
tcp: self.tcp.clone(),
openssl: self.openssl.clone(),
}
}
}
impl<T: Address> NewService for OpensslConnectServiceFactory<T> {
type Request = Connect<T>;
type Response = SslStream<TcpStream>;
type Error = ConnectError;
type Config = ();
type Service = OpensslConnectService<T>;
type InitError = ();
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self, _: &()) -> Self::Future {
ok(self.service())
}
}
#[derive(Clone)]
pub struct OpensslConnectService<T> {
tcp: ConnectService<T>,
openssl: OpensslConnectorService<T, TcpStream>,
}
impl<T: Address> Service for OpensslConnectService<T> {
type Request = Connect<T>;
type Response = SslStream<TcpStream>;
type Error = ConnectError;
type Future = OpensslConnectServiceResponse<T>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, req: Connect<T>) -> Self::Future {
OpensslConnectServiceResponse {
fut1: Some(self.tcp.call(req)),
fut2: None,
openssl: self.openssl.clone(),
}
}
}
pub struct OpensslConnectServiceResponse<T: Address> {
fut1: Option<<ConnectService<T> as Service>::Future>,
fut2: Option<<OpensslConnectorService<T, TcpStream> as Service>::Future>,
openssl: OpensslConnectorService<T, TcpStream>,
}
impl<T: Address> Future for OpensslConnectServiceResponse<T> {
type Item = SslStream<TcpStream>;
type Error = ConnectError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if let Some(ref mut fut) = self.fut1 {
let res = try_ready!(fut.poll());
let _ = self.fut1.take();
self.fut2 = Some(self.openssl.call(res));
}
if let Some(ref mut fut) = self.fut2 {
let connect = try_ready!(fut
.poll()
.map_err(|e| ConnectError::Io(io::Error::new(io::ErrorKind::Other, e))));
Ok(Async::Ready(connect.into_parts().0))
} else {
Ok(Async::NotReady)
}
}
}

View File

@@ -1,133 +0,0 @@
use std::fmt;
use std::marker::PhantomData;
use actix_codec::{AsyncRead, AsyncWrite};
use actix_service::{NewService, Service};
use futures::{future::ok, future::FutureResult, Async, Future, Poll};
use std::sync::Arc;
use tokio_rustls::{
rustls::{ClientConfig, ClientSession},
Connect, TlsConnector, TlsStream,
};
use webpki::DNSNameRef;
use crate::{Address, Connection};
/// Rustls connector factory
pub struct RustlsConnector<T, U> {
connector: Arc<ClientConfig>,
_t: PhantomData<(T, U)>,
}
impl<T, U> RustlsConnector<T, U> {
pub fn new(connector: Arc<ClientConfig>) -> Self {
RustlsConnector {
connector,
_t: PhantomData,
}
}
}
impl<T, U> RustlsConnector<T, U>
where
T: Address,
U: AsyncRead + AsyncWrite + fmt::Debug,
{
pub fn service(
connector: Arc<ClientConfig>,
) -> impl Service<
Request = Connection<T, U>,
Response = Connection<T, TlsStream<U, ClientSession>>,
Error = std::io::Error,
> {
RustlsConnectorService {
connector: connector,
_t: PhantomData,
}
}
}
impl<T, U> Clone for RustlsConnector<T, U> {
fn clone(&self) -> Self {
Self {
connector: self.connector.clone(),
_t: PhantomData,
}
}
}
impl<T: Address, U> NewService for RustlsConnector<T, U>
where
U: AsyncRead + AsyncWrite + fmt::Debug,
{
type Request = Connection<T, U>;
type Response = Connection<T, TlsStream<U, ClientSession>>;
type Error = std::io::Error;
type Config = ();
type Service = RustlsConnectorService<T, U>;
type InitError = ();
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self, _: &()) -> Self::Future {
ok(RustlsConnectorService {
connector: self.connector.clone(),
_t: PhantomData,
})
}
}
pub struct RustlsConnectorService<T, U> {
connector: Arc<ClientConfig>,
_t: PhantomData<(T, U)>,
}
impl<T: Address, U> Service for RustlsConnectorService<T, U>
where
U: AsyncRead + AsyncWrite + fmt::Debug,
{
type Request = Connection<T, U>;
type Response = Connection<T, TlsStream<U, ClientSession>>;
type Error = std::io::Error;
type Future = ConnectAsyncExt<T, U>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, stream: Connection<T, U>) -> Self::Future {
trace!("SSL Handshake start for: {:?}", stream.host());
let (io, stream) = stream.replace(());
let host = DNSNameRef::try_from_ascii_str(stream.host()).unwrap();
ConnectAsyncExt {
fut: TlsConnector::from(self.connector.clone()).connect(host, io),
stream: Some(stream),
}
}
}
pub struct ConnectAsyncExt<T, U> {
fut: Connect<U>,
stream: Option<Connection<T, ()>>,
}
impl<T: Address, U> Future for ConnectAsyncExt<T, U>
where
U: AsyncRead + AsyncWrite + fmt::Debug,
{
type Item = Connection<T, TlsStream<U, ClientSession>>;
type Error = std::io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.fut.poll().map_err(|e| {
trace!("SSL Handshake error: {:?}", e);
e
})? {
Async::Ready(stream) => {
let s = self.stream.take().unwrap();
trace!("SSL Handshake success: {:?}", s.host());
Ok(Async::Ready(s.replace(stream).1))
}
Async::NotReady => Ok(Async::NotReady),
}
}
}

View File

@@ -1,142 +0,0 @@
use actix_codec::{BytesCodec, Framed};
use actix_server_config::Io;
use actix_service::{service_fn, NewService, Service};
use actix_test_server::TestServer;
use bytes::Bytes;
use futures::{future::lazy, Future, Sink};
use http::{HttpTryFrom, Uri};
use trust_dns_resolver::config::{ResolverConfig, ResolverOpts};
use actix_connect::{default_connector, Connect};
#[cfg(feature = "ssl")]
#[test]
fn test_string() {
let mut srv = TestServer::with(|| {
service_fn(|io: Io<tokio_tcp::TcpStream>| {
Framed::new(io.into_parts().0, BytesCodec)
.send(Bytes::from_static(b"test"))
.then(|_| Ok::<_, ()>(()))
})
});
let mut conn = default_connector();
let addr = format!("localhost:{}", srv.port());
let con = srv.run_on(move || conn.call(addr.into())).unwrap();
assert_eq!(con.peer_addr().unwrap(), srv.addr());
}
#[cfg(feature = "rust-tls")]
#[test]
fn test_rustls_string() {
let mut srv = TestServer::with(|| {
service_fn(|io: Io<tokio_tcp::TcpStream>| {
Framed::new(io.into_parts().0, BytesCodec)
.send(Bytes::from_static(b"test"))
.then(|_| Ok::<_, ()>(()))
})
});
let mut conn = default_connector();
let addr = format!("localhost:{}", srv.port());
let con = srv.run_on(move || conn.call(addr.into())).unwrap();
assert_eq!(con.peer_addr().unwrap(), srv.addr());
}
#[test]
fn test_static_str() {
let mut srv = TestServer::with(|| {
service_fn(|io: Io<tokio_tcp::TcpStream>| {
Framed::new(io.into_parts().0, BytesCodec)
.send(Bytes::from_static(b"test"))
.then(|_| Ok::<_, ()>(()))
})
});
let resolver = srv
.block_on(lazy(
|| Ok::<_, ()>(actix_connect::start_default_resolver()),
))
.unwrap();
let mut conn = srv
.block_on(lazy(|| {
Ok::<_, ()>(actix_connect::new_connector(resolver.clone()))
}))
.unwrap();
let con = srv
.block_on(conn.call(Connect::with("10", srv.addr())))
.unwrap();
assert_eq!(con.peer_addr().unwrap(), srv.addr());
let connect = Connect::new(srv.host().to_owned());
let mut conn = srv
.block_on(lazy(|| Ok::<_, ()>(actix_connect::new_connector(resolver))))
.unwrap();
let con = srv.block_on(conn.call(connect));
assert!(con.is_err());
}
#[test]
fn test_new_service() {
let mut srv = TestServer::with(|| {
service_fn(|io: Io<tokio_tcp::TcpStream>| {
Framed::new(io.into_parts().0, BytesCodec)
.send(Bytes::from_static(b"test"))
.then(|_| Ok::<_, ()>(()))
})
});
let resolver = srv
.block_on(lazy(|| {
Ok::<_, ()>(actix_connect::start_resolver(
ResolverConfig::default(),
ResolverOpts::default(),
))
}))
.unwrap();
let factory = srv
.block_on(lazy(|| {
Ok::<_, ()>(actix_connect::new_connector_factory(resolver))
}))
.unwrap();
let mut conn = srv.block_on(factory.new_service(&())).unwrap();
let con = srv
.block_on(conn.call(Connect::with("10", srv.addr())))
.unwrap();
assert_eq!(con.peer_addr().unwrap(), srv.addr());
}
#[cfg(feature = "ssl")]
#[test]
fn test_uri() {
let mut srv = TestServer::with(|| {
service_fn(|io: Io<tokio_tcp::TcpStream>| {
Framed::new(io.into_parts().0, BytesCodec)
.send(Bytes::from_static(b"test"))
.then(|_| Ok::<_, ()>(()))
})
});
let mut conn = default_connector();
let addr = Uri::try_from(format!("https://localhost:{}", srv.port())).unwrap();
let con = srv.run_on(move || conn.call(addr.into())).unwrap();
assert_eq!(con.peer_addr().unwrap(), srv.addr());
}
#[cfg(feature = "rust-tls")]
#[test]
fn test_rustls_uri() {
let mut srv = TestServer::with(|| {
service_fn(|io: Io<tokio_tcp::TcpStream>| {
Framed::new(io.into_parts().0, BytesCodec)
.send(Bytes::from_static(b"test"))
.then(|_| Ok::<_, ()>(()))
})
});
let mut conn = default_connector();
let addr = Uri::try_from(format!("https://localhost:{}", srv.port())).unwrap();
let con = srv.run_on(move || conn.call(addr.into())).unwrap();
assert_eq!(con.peer_addr().unwrap(), srv.addr());
}

View File

@@ -1,35 +0,0 @@
[package]
name = "actix-ioframe"
version = "0.1.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix framed service"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-ioframed/"
categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0"
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
edition = "2018"
workspace = ".."
[lib]
name = "actix_ioframe"
path = "src/lib.rs"
[dependencies]
actix-service = "0.4.1"
actix-codec = "0.1.2"
bytes = "0.4"
either = "1.5.2"
futures = "0.1.25"
tokio-current-thread = "0.1.4"
log = "0.4"
[dev-dependencies]
actix-rt = "0.2.2"
actix-connect = "0.2.0"
actix-test-server = "0.2.2"
actix-server-config = "0.1.1"
tokio-tcp = "0.1"
tokio-timer = "0.2"

View File

@@ -1,35 +0,0 @@
//! Custom cell impl
use std::cell::UnsafeCell;
use std::fmt;
use std::rc::Rc;
pub(crate) struct Cell<T> {
inner: Rc<UnsafeCell<T>>,
}
impl<T> Clone for Cell<T> {
fn clone(&self) -> Self {
Self {
inner: self.inner.clone(),
}
}
}
impl<T: fmt::Debug> fmt::Debug for Cell<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
self.inner.fmt(f)
}
}
impl<T> Cell<T> {
pub fn new(inner: T) -> Self {
Self {
inner: Rc::new(UnsafeCell::new(inner)),
}
}
pub(crate) unsafe fn get_mut(&mut self) -> &mut T {
&mut *self.inner.as_ref().get()
}
}

View File

@@ -1,110 +0,0 @@
use std::marker::PhantomData;
use actix_codec::{AsyncRead, AsyncWrite, Decoder, Encoder, Framed};
use futures::unsync::mpsc;
use crate::dispatcher::FramedMessage;
use crate::sink::Sink;
pub struct Connect<Io, St = (), Codec = ()> {
io: Io,
_t: PhantomData<(St, Codec)>,
}
impl<Io> Connect<Io>
where
Io: AsyncRead + AsyncWrite,
{
pub(crate) fn new(io: Io) -> Self {
Self {
io,
_t: PhantomData,
}
}
pub fn codec<Codec>(self, codec: Codec) -> ConnectResult<Io, (), Codec>
where
Codec: Encoder + Decoder,
{
let (tx, rx) = mpsc::unbounded();
let sink = Sink::new(tx);
ConnectResult {
state: (),
framed: Framed::new(self.io, codec),
rx,
sink,
}
}
}
pub struct ConnectResult<Io, St, Codec: Encoder + Decoder> {
pub(crate) state: St,
pub(crate) framed: Framed<Io, Codec>,
pub(crate) rx: mpsc::UnboundedReceiver<FramedMessage<<Codec as Encoder>::Item>>,
pub(crate) sink: Sink<<Codec as Encoder>::Item>,
}
impl<Io, St, Codec: Encoder + Decoder> ConnectResult<Io, St, Codec> {
#[inline]
pub fn sink(&self) -> &Sink<<Codec as Encoder>::Item> {
&self.sink
}
#[inline]
pub fn get_ref(&self) -> &Io {
self.framed.get_ref()
}
#[inline]
pub fn get_mut(&mut self) -> &mut Io {
self.framed.get_mut()
}
#[inline]
pub fn state<S>(self, state: S) -> ConnectResult<Io, S, Codec> {
ConnectResult {
state,
framed: self.framed,
rx: self.rx,
sink: self.sink,
}
}
}
impl<Io, St, Codec> futures::Stream for ConnectResult<Io, St, Codec>
where
Io: AsyncRead + AsyncWrite,
Codec: Encoder + Decoder,
{
type Item = <Codec as Decoder>::Item;
type Error = <Codec as Decoder>::Error;
fn poll(&mut self) -> futures::Poll<Option<Self::Item>, Self::Error> {
self.framed.poll()
}
}
impl<Io, St, Codec> futures::Sink for ConnectResult<Io, St, Codec>
where
Io: AsyncRead + AsyncWrite,
Codec: Encoder + Decoder,
{
type SinkItem = <Codec as Encoder>::Item;
type SinkError = <Codec as Encoder>::Error;
fn start_send(
&mut self,
item: Self::SinkItem,
) -> futures::StartSend<Self::SinkItem, Self::SinkError> {
self.framed.start_send(item)
}
fn poll_complete(&mut self) -> futures::Poll<(), Self::SinkError> {
self.framed.poll_complete()
}
fn close(&mut self) -> futures::Poll<(), Self::SinkError> {
self.framed.close()
}
}

View File

@@ -1,325 +0,0 @@
//! Framed dispatcher service and related utilities
use std::collections::VecDeque;
use std::mem;
use std::rc::Rc;
use actix_codec::{AsyncRead, AsyncWrite, Decoder, Encoder, Framed};
use actix_service::{IntoService, Service};
use futures::task::AtomicTask;
use futures::unsync::{mpsc, oneshot};
use futures::{Async, Future, Poll, Sink as FutureSink, Stream};
use log::debug;
use crate::cell::Cell;
use crate::error::ServiceError;
use crate::item::Item;
use crate::sink::Sink;
use crate::state::State;
type Request<S, U> = Item<S, U>;
type Response<U> = <U as Encoder>::Item;
pub(crate) enum FramedMessage<T> {
Message(T),
Close,
WaitClose(oneshot::Sender<()>),
}
/// FramedTransport - is a future that reads frames from Framed object
/// and pass then to the service.
pub(crate) struct FramedDispatcher<St, S, T, U>
where
S: Service<Request = Request<St, U>, Response = Option<Response<U>>>,
S::Error: 'static,
S::Future: 'static,
T: AsyncRead + AsyncWrite,
U: Encoder + Decoder,
<U as Encoder>::Item: 'static,
<U as Encoder>::Error: std::fmt::Debug,
{
service: S,
sink: Sink<<U as Encoder>::Item>,
state: State<St>,
dispatch_state: FramedState<S, U>,
framed: Framed<T, U>,
rx: Option<mpsc::UnboundedReceiver<FramedMessage<<U as Encoder>::Item>>>,
inner: Cell<FramedDispatcherInner<<U as Encoder>::Item, S::Error>>,
disconnect: Option<Rc<dyn Fn(&mut St, bool)>>,
}
impl<St, S, T, U> FramedDispatcher<St, S, T, U>
where
S: Service<Request = Request<St, U>, Response = Option<Response<U>>>,
S::Error: 'static,
S::Future: 'static,
T: AsyncRead + AsyncWrite,
U: Decoder + Encoder,
<U as Encoder>::Item: 'static,
<U as Encoder>::Error: std::fmt::Debug,
{
pub(crate) fn new<F: IntoService<S>>(
framed: Framed<T, U>,
state: State<St>,
service: F,
rx: mpsc::UnboundedReceiver<FramedMessage<<U as Encoder>::Item>>,
sink: Sink<<U as Encoder>::Item>,
disconnect: Option<Rc<dyn Fn(&mut St, bool)>>,
) -> Self {
FramedDispatcher {
framed,
state,
sink,
disconnect,
rx: Some(rx),
service: service.into_service(),
dispatch_state: FramedState::Processing,
inner: Cell::new(FramedDispatcherInner {
buf: VecDeque::new(),
task: AtomicTask::new(),
}),
}
}
}
enum FramedState<S: Service, U: Encoder + Decoder> {
Processing,
Error(ServiceError<S::Error, U>),
FramedError(ServiceError<S::Error, U>),
FlushAndStop(Vec<oneshot::Sender<()>>),
Stopping,
}
impl<S: Service, U: Encoder + Decoder> FramedState<S, U> {
fn stop(&mut self, tx: Option<oneshot::Sender<()>>) {
match self {
FramedState::FlushAndStop(ref mut vec) => {
if let Some(tx) = tx {
vec.push(tx)
}
}
FramedState::Processing => {
*self = FramedState::FlushAndStop(if let Some(tx) = tx {
vec![tx]
} else {
Vec::new()
})
}
FramedState::Error(_) | FramedState::FramedError(_) | FramedState::Stopping => {
if let Some(tx) = tx {
let _ = tx.send(());
}
}
}
}
}
struct FramedDispatcherInner<I, E> {
buf: VecDeque<Result<I, E>>,
task: AtomicTask,
}
impl<St, S, T, U> FramedDispatcher<St, S, T, U>
where
S: Service<Request = Request<St, U>, Response = Option<Response<U>>>,
S::Error: 'static,
S::Future: 'static,
T: AsyncRead + AsyncWrite,
U: Decoder + Encoder,
<U as Encoder>::Item: 'static,
<U as Encoder>::Error: std::fmt::Debug,
{
fn disconnect(&mut self, error: bool) {
if let Some(ref disconnect) = self.disconnect {
(&*disconnect)(&mut *self.state.get_mut(), error);
}
}
fn poll_read(&mut self) -> bool {
loop {
match self.service.poll_ready() {
Ok(Async::Ready(_)) => {
let item = match self.framed.poll() {
Ok(Async::Ready(Some(el))) => el,
Err(err) => {
self.dispatch_state =
FramedState::FramedError(ServiceError::Decoder(err));
return true;
}
Ok(Async::NotReady) => return false,
Ok(Async::Ready(None)) => {
self.dispatch_state = FramedState::Stopping;
return true;
}
};
let mut cell = self.inner.clone();
unsafe { cell.get_mut().task.register() };
tokio_current_thread::spawn(
self.service
.call(Item::new(self.state.clone(), self.sink.clone(), item))
.then(move |item| {
let item = match item {
Ok(Some(item)) => Ok(item),
Ok(None) => return Ok(()),
Err(err) => Err(err),
};
unsafe {
let inner = cell.get_mut();
inner.buf.push_back(item);
inner.task.notify();
}
Ok(())
}),
);
}
Ok(Async::NotReady) => return false,
Err(err) => {
self.dispatch_state = FramedState::Error(ServiceError::Service(err));
return true;
}
}
}
}
/// write to framed object
fn poll_write(&mut self) -> bool {
let inner = unsafe { self.inner.get_mut() };
let mut rx_done = self.rx.is_none();
let mut buf_empty = inner.buf.is_empty();
loop {
while !self.framed.is_write_buf_full() {
if !buf_empty {
match inner.buf.pop_front().unwrap() {
Ok(msg) => {
if let Err(err) = self.framed.force_send(msg) {
self.dispatch_state =
FramedState::FramedError(ServiceError::Encoder(err));
return true;
}
buf_empty = inner.buf.is_empty();
}
Err(err) => {
self.dispatch_state =
FramedState::Error(ServiceError::Service(err));
return true;
}
}
}
if !rx_done && self.rx.is_some() {
match self.rx.as_mut().unwrap().poll() {
Ok(Async::Ready(Some(FramedMessage::Message(msg)))) => {
if let Err(err) = self.framed.force_send(msg) {
self.dispatch_state =
FramedState::FramedError(ServiceError::Encoder(err));
return true;
}
}
Ok(Async::Ready(Some(FramedMessage::Close))) => {
self.dispatch_state.stop(None);
return true;
}
Ok(Async::Ready(Some(FramedMessage::WaitClose(tx)))) => {
self.dispatch_state.stop(Some(tx));
return true;
}
Ok(Async::Ready(None)) => {
rx_done = true;
let _ = self.rx.take();
}
Ok(Async::NotReady) => rx_done = true,
Err(_e) => {
rx_done = true;
let _ = self.rx.take();
}
}
}
if rx_done && buf_empty {
break;
}
}
if !self.framed.is_write_buf_empty() {
match self.framed.poll_complete() {
Ok(Async::NotReady) => break,
Err(err) => {
debug!("Error sending data: {:?}", err);
self.dispatch_state =
FramedState::FramedError(ServiceError::Encoder(err));
return true;
}
Ok(Async::Ready(_)) => (),
}
} else {
break;
}
}
false
}
}
impl<St, S, T, U> Future for FramedDispatcher<St, S, T, U>
where
S: Service<Request = Request<St, U>, Response = Option<Response<U>>>,
S::Error: 'static,
S::Future: 'static,
T: AsyncRead + AsyncWrite,
U: Decoder + Encoder,
<U as Encoder>::Item: 'static,
<U as Encoder>::Error: std::fmt::Debug,
{
type Item = ();
type Error = ServiceError<S::Error, U>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match mem::replace(&mut self.dispatch_state, FramedState::Processing) {
FramedState::Processing => {
if self.poll_read() || self.poll_write() {
self.poll()
} else {
Ok(Async::NotReady)
}
}
FramedState::Error(err) => {
if self.framed.is_write_buf_empty()
|| (self.poll_write() || self.framed.is_write_buf_empty())
{
self.disconnect(true);
Err(err)
} else {
self.dispatch_state = FramedState::Error(err);
Ok(Async::NotReady)
}
}
FramedState::FlushAndStop(mut vec) => {
if !self.framed.is_write_buf_empty() {
match self.framed.poll_complete() {
Err(err) => {
debug!("Error sending data: {:?}", err);
}
Ok(Async::NotReady) => {
self.dispatch_state = FramedState::FlushAndStop(vec);
return Ok(Async::NotReady);
}
Ok(Async::Ready(_)) => (),
}
};
for tx in vec.drain(..) {
let _ = tx.send(());
}
self.disconnect(false);
Ok(Async::Ready(()))
}
FramedState::FramedError(err) => {
self.disconnect(true);
Err(err)
}
FramedState::Stopping => {
self.disconnect(false);
Ok(Async::Ready(()))
}
}
}
}

View File

@@ -1,49 +0,0 @@
use std::fmt;
use actix_codec::{Decoder, Encoder};
/// Framed service errors
pub enum ServiceError<E, U: Encoder + Decoder> {
/// Inner service error
Service(E),
/// Encoder parse error
Encoder(<U as Encoder>::Error),
/// Decoder parse error
Decoder(<U as Decoder>::Error),
}
impl<E, U: Encoder + Decoder> From<E> for ServiceError<E, U> {
fn from(err: E) -> Self {
ServiceError::Service(err)
}
}
impl<E, U: Encoder + Decoder> fmt::Debug for ServiceError<E, U>
where
E: fmt::Debug,
<U as Encoder>::Error: fmt::Debug,
<U as Decoder>::Error: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
ServiceError::Service(ref e) => write!(fmt, "ServiceError::Service({:?})", e),
ServiceError::Encoder(ref e) => write!(fmt, "ServiceError::Encoder({:?})", e),
ServiceError::Decoder(ref e) => write!(fmt, "ServiceError::Encoder({:?})", e),
}
}
}
impl<E, U: Encoder + Decoder> fmt::Display for ServiceError<E, U>
where
E: fmt::Display,
<U as Encoder>::Error: fmt::Debug,
<U as Decoder>::Error: fmt::Debug,
{
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
match *self {
ServiceError::Service(ref e) => write!(fmt, "{}", e),
ServiceError::Encoder(ref e) => write!(fmt, "{:?}", e),
ServiceError::Decoder(ref e) => write!(fmt, "{:?}", e),
}
}
}

View File

@@ -1,90 +0,0 @@
use std::cell::{Ref, RefMut};
use std::fmt;
use std::ops::{Deref, DerefMut};
use actix_codec::{Decoder, Encoder};
use crate::sink::Sink;
use crate::state::State;
pub struct Item<St, Codec: Encoder + Decoder> {
state: State<St>,
sink: Sink<<Codec as Encoder>::Item>,
item: <Codec as Decoder>::Item,
}
impl<St, Codec> Item<St, Codec>
where
Codec: Encoder + Decoder,
{
pub(crate) fn new(
state: State<St>,
sink: Sink<<Codec as Encoder>::Item>,
item: <Codec as Decoder>::Item,
) -> Self {
Item { state, sink, item }
}
#[inline]
pub fn state(&self) -> Ref<St> {
self.state.get_ref()
}
#[inline]
pub fn state_mut(&mut self) -> RefMut<St> {
self.state.get_mut()
}
#[inline]
pub fn sink(&self) -> &Sink<<Codec as Encoder>::Item> {
&self.sink
}
#[inline]
pub fn into_inner(self) -> <Codec as Decoder>::Item {
self.item
}
#[inline]
pub fn into_parts(
self,
) -> (
State<St>,
Sink<<Codec as Encoder>::Item>,
<Codec as Decoder>::Item,
) {
(self.state, self.sink, self.item)
}
}
impl<St, Codec> Deref for Item<St, Codec>
where
Codec: Encoder + Decoder,
{
type Target = <Codec as Decoder>::Item;
#[inline]
fn deref(&self) -> &<Codec as Decoder>::Item {
&self.item
}
}
impl<St, Codec> DerefMut for Item<St, Codec>
where
Codec: Encoder + Decoder,
{
#[inline]
fn deref_mut(&mut self) -> &mut <Codec as Decoder>::Item {
&mut self.item
}
}
impl<St, Codec> fmt::Debug for Item<St, Codec>
where
Codec: Encoder + Decoder,
<Codec as Decoder>::Item: fmt::Debug,
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_tuple("FramedItem").field(&self.item).finish()
}
}

View File

@@ -1,15 +0,0 @@
mod cell;
mod connect;
mod dispatcher;
mod error;
mod item;
mod service;
mod sink;
mod state;
pub use self::connect::{Connect, ConnectResult};
pub use self::error::ServiceError;
pub use self::item::Item;
pub use self::service::{Builder, NewServiceBuilder, ServiceBuilder};
pub use self::sink::Sink;
pub use self::state::State;

View File

@@ -1,363 +0,0 @@
use std::marker::PhantomData;
use std::rc::Rc;
use actix_codec::{AsyncRead, AsyncWrite, Decoder, Encoder};
use actix_service::{IntoNewService, IntoService, NewService, Service};
use futures::{Async, Future, Poll};
use crate::connect::{Connect, ConnectResult};
use crate::dispatcher::FramedDispatcher;
use crate::error::ServiceError;
use crate::item::Item;
use crate::state::State;
type RequestItem<S, U> = Item<S, U>;
type ResponseItem<U> = Option<<U as Encoder>::Item>;
/// Service builder - structure that follows the builder pattern
/// for building instances for framed services.
pub struct Builder<St, Codec>(PhantomData<(St, Codec)>);
impl<St, Codec> Builder<St, Codec> {
pub fn new() -> Builder<St, Codec> {
Builder(PhantomData)
}
/// Construct framed handler service with specified connect service
pub fn service<Io, C, F>(self, connect: F) -> ServiceBuilder<St, C, Io, Codec>
where
F: IntoService<C>,
Io: AsyncRead + AsyncWrite,
C: Service<Request = Connect<Io>, Response = ConnectResult<Io, St, Codec>>,
Codec: Decoder + Encoder,
{
ServiceBuilder {
connect: connect.into_service(),
disconnect: None,
_t: PhantomData,
}
}
/// Construct framed handler new service with specified connect service
pub fn factory<Io, C, F>(self, connect: F) -> NewServiceBuilder<St, C, Io, Codec>
where
F: IntoNewService<C>,
Io: AsyncRead + AsyncWrite,
C: NewService<
Config = (),
Request = Connect<Io>,
Response = ConnectResult<Io, St, Codec>,
>,
C::Error: 'static,
C::Future: 'static,
Codec: Decoder + Encoder,
{
NewServiceBuilder {
connect: connect.into_new_service(),
disconnect: None,
_t: PhantomData,
}
}
}
pub struct ServiceBuilder<St, C, Io, Codec> {
connect: C,
disconnect: Option<Rc<dyn Fn(&mut St, bool)>>,
_t: PhantomData<(St, Io, Codec)>,
}
impl<St, C, Io, Codec> ServiceBuilder<St, C, Io, Codec>
where
St: 'static,
Io: AsyncRead + AsyncWrite,
C: Service<Request = Connect<Io>, Response = ConnectResult<Io, St, Codec>>,
C::Error: 'static,
Codec: Decoder + Encoder,
<Codec as Encoder>::Item: 'static,
<Codec as Encoder>::Error: std::fmt::Debug,
{
/// Callback to execute on disconnect
///
/// Second parameter indicates error occured during disconnect.
pub fn disconnect<F, Out>(mut self, disconnect: F) -> Self
where
F: Fn(&mut St, bool) + 'static,
{
self.disconnect = Some(Rc::new(disconnect));
self
}
/// Provide stream items handler service and construct service factory.
pub fn finish<F, T>(
self,
service: F,
) -> impl Service<Request = Io, Response = (), Error = ServiceError<C::Error, Codec>>
where
F: IntoNewService<T>,
T: NewService<
Config = St,
Request = RequestItem<St, Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
> + 'static,
{
FramedServiceImpl {
connect: self.connect,
handler: Rc::new(service.into_new_service()),
disconnect: self.disconnect.clone(),
_t: PhantomData,
}
}
}
pub struct NewServiceBuilder<St, C, Io, Codec> {
connect: C,
disconnect: Option<Rc<dyn Fn(&mut St, bool)>>,
_t: PhantomData<(St, Io, Codec)>,
}
impl<St, C, Io, Codec> NewServiceBuilder<St, C, Io, Codec>
where
St: 'static,
Io: AsyncRead + AsyncWrite,
C: NewService<Config = (), Request = Connect<Io>, Response = ConnectResult<Io, St, Codec>>,
C::Error: 'static,
C::Future: 'static,
Codec: Decoder + Encoder,
<Codec as Encoder>::Item: 'static,
<Codec as Encoder>::Error: std::fmt::Debug,
{
/// Callback to execute on disconnect
///
/// Second parameter indicates error occured during disconnect.
pub fn disconnect<F>(mut self, disconnect: F) -> Self
where
F: Fn(&mut St, bool) + 'static,
{
self.disconnect = Some(Rc::new(disconnect));
self
}
pub fn finish<F, T, Cfg>(
self,
service: F,
) -> impl NewService<
Config = Cfg,
Request = Io,
Response = (),
Error = ServiceError<C::Error, Codec>,
>
where
F: IntoNewService<T>,
T: NewService<
Config = St,
Request = RequestItem<St, Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
> + 'static,
{
FramedService {
connect: self.connect,
handler: Rc::new(service.into_new_service()),
disconnect: self.disconnect,
_t: PhantomData,
}
}
}
pub(crate) struct FramedService<St, C, T, Io, Codec, Cfg> {
connect: C,
handler: Rc<T>,
disconnect: Option<Rc<dyn Fn(&mut St, bool)>>,
_t: PhantomData<(St, Io, Codec, Cfg)>,
}
impl<St, C, T, Io, Codec, Cfg> NewService for FramedService<St, C, T, Io, Codec, Cfg>
where
St: 'static,
Io: AsyncRead + AsyncWrite,
C: NewService<Config = (), Request = Connect<Io>, Response = ConnectResult<Io, St, Codec>>,
C::Error: 'static,
C::Future: 'static,
T: NewService<
Config = St,
Request = RequestItem<St, Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
> + 'static,
Codec: Decoder + Encoder,
<Codec as Encoder>::Item: 'static,
<Codec as Encoder>::Error: std::fmt::Debug,
{
type Config = Cfg;
type Request = Io;
type Response = ();
type Error = ServiceError<C::Error, Codec>;
type InitError = C::InitError;
type Service = FramedServiceImpl<St, C::Service, T, Io, Codec>;
type Future = Box<dyn Future<Item = Self::Service, Error = Self::InitError>>;
fn new_service(&self, _: &Cfg) -> Self::Future {
let handler = self.handler.clone();
let disconnect = self.disconnect.clone();
// create connect service and then create service impl
Box::new(
self.connect
.new_service(&())
.map(move |connect| FramedServiceImpl {
connect,
handler,
disconnect,
_t: PhantomData,
}),
)
}
}
pub struct FramedServiceImpl<St, C, T, Io, Codec> {
connect: C,
handler: Rc<T>,
disconnect: Option<Rc<dyn Fn(&mut St, bool)>>,
_t: PhantomData<(St, Io, Codec)>,
}
impl<St, C, T, Io, Codec> Service for FramedServiceImpl<St, C, T, Io, Codec>
where
Io: AsyncRead + AsyncWrite,
C: Service<Request = Connect<Io>, Response = ConnectResult<Io, St, Codec>>,
C::Error: 'static,
T: NewService<
Config = St,
Request = RequestItem<St, Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
<<T as NewService>::Service as Service>::Future: 'static,
Codec: Decoder + Encoder,
<Codec as Encoder>::Item: 'static,
<Codec as Encoder>::Error: std::fmt::Debug,
{
type Request = Io;
type Response = ();
type Error = ServiceError<C::Error, Codec>;
type Future = FramedServiceImplResponse<St, Io, Codec, C, T>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.connect.poll_ready().map_err(|e| e.into())
}
fn call(&mut self, req: Io) -> Self::Future {
FramedServiceImplResponse {
inner: FramedServiceImplResponseInner::Connect(
self.connect.call(Connect::new(req)),
self.handler.clone(),
),
disconnect: self.disconnect.clone(),
}
}
}
pub struct FramedServiceImplResponse<St, Io, Codec, C, T>
where
C: Service<Request = Connect<Io>, Response = ConnectResult<Io, St, Codec>>,
C::Error: 'static,
T: NewService<
Config = St,
Request = RequestItem<St, Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
<<T as NewService>::Service as Service>::Future: 'static,
Io: AsyncRead + AsyncWrite,
Codec: Encoder + Decoder,
<Codec as Encoder>::Item: 'static,
<Codec as Encoder>::Error: std::fmt::Debug,
{
inner: FramedServiceImplResponseInner<St, Io, Codec, C, T>,
disconnect: Option<Rc<dyn Fn(&mut St, bool)>>,
}
enum FramedServiceImplResponseInner<St, Io, Codec, C, T>
where
C: Service<Request = Connect<Io>, Response = ConnectResult<Io, St, Codec>>,
C::Error: 'static,
T: NewService<
Config = St,
Request = RequestItem<St, Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
<<T as NewService>::Service as Service>::Future: 'static,
Io: AsyncRead + AsyncWrite,
Codec: Encoder + Decoder,
<Codec as Encoder>::Item: 'static,
<Codec as Encoder>::Error: std::fmt::Debug,
{
Connect(C::Future, Rc<T>),
Handler(T::Future, Option<ConnectResult<Io, St, Codec>>),
Dispatcher(FramedDispatcher<St, T::Service, Io, Codec>),
}
impl<St, Io, Codec, C, T> Future for FramedServiceImplResponse<St, Io, Codec, C, T>
where
C: Service<Request = Connect<Io>, Response = ConnectResult<Io, St, Codec>>,
C::Error: 'static,
T: NewService<
Config = St,
Request = RequestItem<St, Codec>,
Response = ResponseItem<Codec>,
Error = C::Error,
InitError = C::Error,
>,
<<T as NewService>::Service as Service>::Future: 'static,
Io: AsyncRead + AsyncWrite,
Codec: Encoder + Decoder,
<Codec as Encoder>::Item: 'static,
<Codec as Encoder>::Error: std::fmt::Debug,
{
type Item = ();
type Error = ServiceError<C::Error, Codec>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.inner {
FramedServiceImplResponseInner::Connect(ref mut fut, ref handler) => {
match fut.poll()? {
Async::Ready(res) => {
self.inner = FramedServiceImplResponseInner::Handler(
handler.new_service(&res.state),
Some(res),
);
self.poll()
}
Async::NotReady => Ok(Async::NotReady),
}
}
FramedServiceImplResponseInner::Handler(ref mut fut, ref mut res) => {
match fut.poll()? {
Async::Ready(handler) => {
let res = res.take().unwrap();
self.inner =
FramedServiceImplResponseInner::Dispatcher(FramedDispatcher::new(
res.framed,
State::new(res.state),
handler,
res.rx,
res.sink,
self.disconnect.clone(),
));
self.poll()
}
Async::NotReady => Ok(Async::NotReady),
}
}
FramedServiceImplResponseInner::Dispatcher(ref mut fut) => fut.poll(),
}
}
}

View File

@@ -1,44 +0,0 @@
use std::fmt;
use futures::unsync::{mpsc, oneshot};
use futures::Future;
use crate::dispatcher::FramedMessage;
pub struct Sink<T>(mpsc::UnboundedSender<FramedMessage<T>>);
impl<T> Clone for Sink<T> {
fn clone(&self) -> Self {
Sink(self.0.clone())
}
}
impl<T> Sink<T> {
pub(crate) fn new(tx: mpsc::UnboundedSender<FramedMessage<T>>) -> Self {
Sink(tx)
}
/// Close connection
pub fn close(&self) {
let _ = self.0.unbounded_send(FramedMessage::Close);
}
/// Close connection
pub fn wait_close(&self) -> impl Future<Item = (), Error = ()> {
let (tx, rx) = oneshot::channel();
let _ = self.0.unbounded_send(FramedMessage::WaitClose(tx));
rx.map_err(|_| ())
}
/// Send item
pub fn send(&self, item: T) {
let _ = self.0.unbounded_send(FramedMessage::Message(item));
}
}
impl<T> fmt::Debug for Sink<T> {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
fmt.debug_struct("Sink").finish()
}
}

View File

@@ -1,30 +0,0 @@
use std::cell::{Ref, RefCell, RefMut};
use std::rc::Rc;
/// Connection state
///
/// Connection state is an arbitrary data attached to the each incoming message.
#[derive(Debug)]
pub struct State<T>(Rc<RefCell<T>>);
impl<T> State<T> {
pub(crate) fn new(st: T) -> Self {
State(Rc::new(RefCell::new(st)))
}
#[inline]
pub fn get_ref(&self) -> Ref<T> {
self.0.borrow()
}
#[inline]
pub fn get_mut(&mut self) -> RefMut<T> {
self.0.borrow_mut()
}
}
impl<T> Clone for State<T> {
fn clone(&self) -> Self {
State(self.0.clone())
}
}

View File

@@ -1,60 +0,0 @@
use std::sync::atomic::{AtomicBool, Ordering};
use std::sync::Arc;
use std::time::Duration;
use actix_codec::BytesCodec;
use actix_server_config::Io;
use actix_service::{new_apply_fn, Service};
use actix_test_server::TestServer;
use futures::Future;
use tokio_tcp::TcpStream;
use tokio_timer::sleep;
use actix_ioframe::{Builder, Connect};
struct State;
#[test]
fn test_disconnect() -> std::io::Result<()> {
let disconnect = Arc::new(AtomicBool::new(false));
let disconnect1 = disconnect.clone();
let mut srv = TestServer::with(move || {
let disconnect1 = disconnect1.clone();
new_apply_fn(
Builder::new()
.factory(|conn: Connect<_>| Ok(conn.codec(BytesCodec).state(State)))
.disconnect(move |_, _| {
disconnect1.store(true, Ordering::Relaxed);
})
.finish(|_t| Ok(None)),
|io: Io<TcpStream>, srv| srv.call(io.into_parts().0),
)
});
let mut client = Builder::new()
.service(|conn: Connect<_>| {
let conn = conn.codec(BytesCodec).state(State);
conn.sink().close();
Ok(conn)
})
.finish(|_t| Ok(None));
let conn = srv
.block_on(
actix_connect::default_connector()
.call(actix_connect::Connect::with(String::new(), srv.addr())),
)
.unwrap();
srv.block_on(client.call(conn.into_parts().0)).unwrap();
let _ = srv.block_on(
sleep(Duration::from_millis(100))
.map(|_| ())
.map_err(|_| ()),
);
assert!(disconnect.load(Ordering::Relaxed));
Ok(())
}

1
actix-macros/.gitignore vendored Normal file
View File

@@ -0,0 +1 @@
/wip

13
actix-macros/CHANGES.md Normal file
View File

@@ -0,0 +1,13 @@
# CHANGES
## 0.1.3 - 2020-12-3
* Add `actix-reexport` feature
## 0.1.2 - 2020-05-18
### Changed
* Forward actix_rt::test arguments to test function [#127]
[#127]: https://github.com/actix/actix-net/pull/127

26
actix-macros/Cargo.toml Normal file
View File

@@ -0,0 +1,26 @@
[package]
name = "actix-macros"
version = "0.1.3"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix runtime macros"
repository = "https://github.com/actix/actix-net"
documentation = "https://docs.rs/actix-macros/"
categories = ["network-programming", "asynchronous"]
license = "MIT OR Apache-2.0"
edition = "2018"
[lib]
proc-macro = true
[dependencies]
quote = "1.0.3"
syn = { version = "^1", features = ["full"] }
[features]
actix-reexport = []
[dev-dependencies]
actix-rt = "1.0"
futures-util = { version = "0.3", default-features = false }
trybuild = "1"

116
actix-macros/src/lib.rs Normal file
View File

@@ -0,0 +1,116 @@
//! Macros for use with Tokio
#![deny(rust_2018_idioms, nonstandard_style)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
use proc_macro::TokenStream;
use quote::quote;
/// Marks async function to be executed by actix system.
///
/// ## Usage
///
/// ```
/// #[actix_rt::main]
/// async fn main() {
/// println!("Hello world");
/// }
/// ```
#[allow(clippy::needless_doctest_main)]
#[proc_macro_attribute]
#[cfg(not(test))] // Work around for rust-lang/rust#62127
pub fn main(_: TokenStream, item: TokenStream) -> TokenStream {
let mut input = syn::parse_macro_input!(item as syn::ItemFn);
let attrs = &input.attrs;
let vis = &input.vis;
let sig = &mut input.sig;
let body = &input.block;
let name = &sig.ident;
if sig.asyncness.is_none() {
return syn::Error::new_spanned(sig.fn_token, "only async fn is supported")
.to_compile_error()
.into();
}
sig.asyncness = None;
if cfg!(feature = "actix-reexport") {
(quote! {
#(#attrs)*
#vis #sig {
actix::System::new(stringify!(#name))
.block_on(async move { #body })
}
})
.into()
} else {
(quote! {
#(#attrs)*
#vis #sig {
actix_rt::System::new(stringify!(#name))
.block_on(async move { #body })
}
})
.into()
}
}
/// Marks async test function to be executed by actix runtime.
///
/// ## Usage
///
/// ```no_run
/// #[actix_rt::test]
/// async fn my_test() {
/// assert!(true);
/// }
/// ```
#[proc_macro_attribute]
pub fn test(_: TokenStream, item: TokenStream) -> TokenStream {
let mut input = syn::parse_macro_input!(item as syn::ItemFn);
let attrs = &input.attrs;
let vis = &input.vis;
let sig = &mut input.sig;
let body = &input.block;
let mut has_test_attr = false;
for attr in attrs {
if attr.path.is_ident("test") {
has_test_attr = true;
}
}
if sig.asyncness.is_none() {
return syn::Error::new_spanned(
input.sig.fn_token,
format!("only async fn is supported, {}", input.sig.ident),
)
.to_compile_error()
.into();
}
sig.asyncness = None;
let result = if has_test_attr {
quote! {
#(#attrs)*
#vis #sig {
actix_rt::System::new("test")
.block_on(async { #body })
}
}
} else {
quote! {
#[test]
#(#attrs)*
#vis #sig {
actix_rt::System::new("test")
.block_on(async { #body })
}
}
};
result.into()
}

View File

@@ -0,0 +1,9 @@
#[test]
fn compile_macros() {
let t = trybuild::TestCases::new();
t.pass("tests/trybuild/main-01-basic.rs");
t.compile_fail("tests/trybuild/main-02-only-async.rs");
t.pass("tests/trybuild/test-01-basic.rs");
t.pass("tests/trybuild/test-02-keep-attrs.rs");
}

View File

@@ -0,0 +1,4 @@
#[actix_rt::main]
async fn main() {
println!("Hello world");
}

View File

@@ -0,0 +1,4 @@
#[actix_rt::main]
fn main() {
futures_util::future::ready(()).await
}

View File

@@ -0,0 +1,14 @@
error: only async fn is supported
--> $DIR/main-02-only-async.rs:2:1
|
2 | fn main() {
| ^^
error[E0601]: `main` function not found in crate `$CRATE`
--> $DIR/main-02-only-async.rs:1:1
|
1 | / #[actix_rt::main]
2 | | fn main() {
3 | | futures_util::future::ready(()).await
4 | | }
| |_^ consider adding a `main` function to `$DIR/tests/trybuild/main-02-only-async.rs`

View File

@@ -0,0 +1,6 @@
#[actix_rt::test]
async fn my_test() {
assert!(true);
}
fn main() {}

View File

@@ -0,0 +1,7 @@
#[actix_rt::test]
#[should_panic]
async fn my_test() {
todo!()
}
fn main() {}

61
actix-router/CHANGES.md Normal file
View File

@@ -0,0 +1,61 @@
# Changes
## Unreleased - 2021-xx-xx
## 0.2.6 - 2021-01-09
* Use `bytestring` version range compatible with Bytes v1.0. [#246]
[#246]: https://github.com/actix/actix-net/pull/246
## 0.2.5 - 2020-09-20
* Fix `from_hex()` method
## 0.2.4 - 2019-12-31
* Add `ResourceDef::resource_path_named()` path generation method
## 0.2.3 - 2019-12-25
* Add impl `IntoPattern` for `&String`
## 0.2.2 - 2019-12-25
* Use `IntoPattern` for `RouterBuilder::path()`
## 0.2.1 - 2019-12-25
* Add `IntoPattern` trait
* Add multi-pattern resources
## 0.2.0 - 2019-12-07
* Update http to 0.2
* Update regex to 1.3
* Use bytestring instead of string
## 0.1.5 - 2019-05-15
* Remove debug prints
## 0.1.4 - 2019-05-15
* Fix checked resource match
## 0.1.3 - 2019-04-22
* Added support for `remainder match` (i.e "/path/{tail}*")
## 0.1.2 - 2019-04-07
* Export `Quoter` type
* Allow to reset `Path` instance
## 0.1.1 - 2019-04-03
* Get dynamic segment by name instead of iterator.
## 0.1.0 - 2019-03-09
* Initial release

View File

@@ -1,16 +1,14 @@
[package]
name = "actix-router"
version = "0.1.5"
version = "0.2.6"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Path router"
description = "Resource path matching library"
keywords = ["actix"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-router/"
license = "MIT/Apache-2.0"
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
license = "MIT OR Apache-2.0"
edition = "2018"
workspace = ".."
[lib]
name = "actix_router"
@@ -20,13 +18,12 @@ path = "src/lib.rs"
default = ["http"]
[dependencies]
bytes = "0.4"
regex = "1.0"
serde = "1.0.80"
string = "0.2.0"
log = "0.4"
http = { version="0.1.14", optional=true }
regex = "1.3.1"
serde = "1.0.104"
bytestring = ">=0.1.5, <2"
log = "0.4.8"
http = { version = "0.2.2", optional = true }
[dev-dependencies]
http = "0.1.14"
http = "0.2.2"
serde_derive = "1.0"

View File

@@ -7,9 +7,13 @@ use crate::ResourcePath;
macro_rules! unsupported_type {
($trait_fn:ident, $name:expr) => {
fn $trait_fn<V>(self, _: V) -> Result<V::Value, Self::Error>
where V: Visitor<'de>
where
V: Visitor<'de>,
{
Err(de::value::Error::custom(concat!("unsupported type: ", $name)))
Err(de::value::Error::custom(concat!(
"unsupported type: ",
$name
)))
}
};
}
@@ -17,23 +21,28 @@ macro_rules! unsupported_type {
macro_rules! parse_single_value {
($trait_fn:ident, $visit_fn:ident, $tp:tt) => {
fn $trait_fn<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: Visitor<'de>
where
V: Visitor<'de>,
{
if self.path.len() != 1 {
Err(de::value::Error::custom(
format!("wrong number of parameters: {} expected 1",
self.path.len()).as_str()))
format!("wrong number of parameters: {} expected 1", self.path.len())
.as_str(),
))
} else {
let v = self.path[0].parse().map_err(
|_| de::value::Error::custom(
format!("can not parse {:?} to a {}", &self.path[0], $tp)))?;
let v = self.path[0].parse().map_err(|_| {
de::value::Error::custom(format!(
"can not parse {:?} to a {}",
&self.path[0], $tp
))
})?;
visitor.$visit_fn(v)
}
}
}
};
}
pub struct PathDeserializer<'de, T: ResourcePath + 'de> {
pub struct PathDeserializer<'de, T: ResourcePath> {
path: &'de Path<T>,
}
@@ -152,9 +161,7 @@ impl<'de, T: ResourcePath + 'de> Deserializer<'de> for PathDeserializer<'de, T>
V: Visitor<'de>,
{
if self.path.is_empty() {
Err(de::value::Error::custom(
"expeceted at least one parameters",
))
Err(de::value::Error::custom("expected at least one parameters"))
} else {
visitor.visit_enum(ValueEnum {
value: &self.path[0],
@@ -268,14 +275,15 @@ impl<'de> Deserializer<'de> for Key<'de> {
macro_rules! parse_value {
($trait_fn:ident, $visit_fn:ident, $tp:tt) => {
fn $trait_fn<V>(self, visitor: V) -> Result<V::Value, Self::Error>
where V: Visitor<'de>
where
V: Visitor<'de>,
{
let v = self.value.parse().map_err(
|_| de::value::Error::custom(
format!("can not parse {:?} to a {}", self.value, $tp)))?;
let v = self.value.parse().map_err(|_| {
de::value::Error::custom(format!("can not parse {:?} to a {}", self.value, $tp))
})?;
visitor.$visit_fn(v)
}
}
};
}
struct Value<'de> {
@@ -492,7 +500,7 @@ mod tests {
#[derive(Deserialize)]
struct Id {
id: String,
_id: String,
}
#[derive(Debug, Deserialize)]

152
actix-router/src/lib.rs Normal file
View File

@@ -0,0 +1,152 @@
//! Resource path matching library.
#![deny(rust_2018_idioms, nonstandard_style)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
mod de;
mod path;
mod resource;
mod router;
pub use self::de::PathDeserializer;
pub use self::path::Path;
pub use self::resource::ResourceDef;
pub use self::router::{ResourceInfo, Router, RouterBuilder};
pub trait Resource<T: ResourcePath> {
fn resource_path(&mut self) -> &mut Path<T>;
}
pub trait ResourcePath {
fn path(&self) -> &str;
}
impl ResourcePath for String {
fn path(&self) -> &str {
self.as_str()
}
}
impl<'a> ResourcePath for &'a str {
fn path(&self) -> &str {
self
}
}
impl ResourcePath for bytestring::ByteString {
fn path(&self) -> &str {
&*self
}
}
/// Helper trait for type that could be converted to path pattern
pub trait IntoPattern {
fn is_single(&self) -> bool;
fn patterns(&self) -> Vec<String>;
}
impl IntoPattern for String {
fn is_single(&self) -> bool {
true
}
fn patterns(&self) -> Vec<String> {
vec![self.clone()]
}
}
impl<'a> IntoPattern for &'a String {
fn is_single(&self) -> bool {
true
}
fn patterns(&self) -> Vec<String> {
vec![self.as_str().to_string()]
}
}
impl<'a> IntoPattern for &'a str {
fn is_single(&self) -> bool {
true
}
fn patterns(&self) -> Vec<String> {
vec![(*self).to_string()]
}
}
impl<T: AsRef<str>> IntoPattern for Vec<T> {
fn is_single(&self) -> bool {
self.len() == 1
}
fn patterns(&self) -> Vec<String> {
self.iter().map(|v| v.as_ref().to_string()).collect()
}
}
macro_rules! array_patterns (($tp:ty, $num:tt) => {
impl IntoPattern for [$tp; $num] {
fn is_single(&self) -> bool {
$num == 1
}
fn patterns(&self) -> Vec<String> {
self.iter().map(|v| v.to_string()).collect()
}
}
});
array_patterns!(&str, 1);
array_patterns!(&str, 2);
array_patterns!(&str, 3);
array_patterns!(&str, 4);
array_patterns!(&str, 5);
array_patterns!(&str, 6);
array_patterns!(&str, 7);
array_patterns!(&str, 8);
array_patterns!(&str, 9);
array_patterns!(&str, 10);
array_patterns!(&str, 11);
array_patterns!(&str, 12);
array_patterns!(&str, 13);
array_patterns!(&str, 14);
array_patterns!(&str, 15);
array_patterns!(&str, 16);
array_patterns!(String, 1);
array_patterns!(String, 2);
array_patterns!(String, 3);
array_patterns!(String, 4);
array_patterns!(String, 5);
array_patterns!(String, 6);
array_patterns!(String, 7);
array_patterns!(String, 8);
array_patterns!(String, 9);
array_patterns!(String, 10);
array_patterns!(String, 11);
array_patterns!(String, 12);
array_patterns!(String, 13);
array_patterns!(String, 14);
array_patterns!(String, 15);
array_patterns!(String, 16);
#[cfg(feature = "http")]
mod url;
#[cfg(feature = "http")]
pub use self::url::{Quoter, Url};
#[cfg(feature = "http")]
mod http_support {
use super::ResourcePath;
use http::Uri;
impl ResourcePath for Uri {
fn path(&self) -> &str {
self.path()
}
}
}

View File

@@ -1,5 +1,4 @@
use std::ops::Index;
use std::rc::Rc;
use serde::de;
@@ -19,7 +18,7 @@ pub(crate) enum PathItem {
pub struct Path<T> {
path: T,
pub(crate) skip: u16,
pub(crate) segments: Vec<(Rc<String>, PathItem)>,
pub(crate) segments: Vec<(&'static str, PathItem)>,
}
impl<T: Default> Default for Path<T> {
@@ -96,7 +95,7 @@ impl<T: ResourcePath> Path<T> {
self.skip += n;
}
pub(crate) fn add(&mut self, name: Rc<String>, value: PathItem) {
pub(crate) fn add(&mut self, name: &'static str, value: PathItem) {
match value {
PathItem::Static(s) => self.segments.push((name, PathItem::Static(s))),
PathItem::Segment(begin, end) => self
@@ -106,9 +105,8 @@ impl<T: ResourcePath> Path<T> {
}
#[doc(hidden)]
pub fn add_static(&mut self, name: &str, value: &'static str) {
self.segments
.push((Rc::new(name.to_string()), PathItem::Static(value)));
pub fn add_static(&mut self, name: &'static str, value: &'static str) {
self.segments.push((name, PathItem::Static(value)));
}
#[inline]
@@ -126,7 +124,7 @@ impl<T: ResourcePath> Path<T> {
/// Get matched parameter by name without type conversion
pub fn get(&self, key: &str) -> Option<&str> {
for item in self.segments.iter() {
if key == item.0.as_str() {
if key == item.0 {
return match item.1 {
PathItem::Static(ref s) => Some(&s),
PathItem::Segment(s, e) => {
@@ -160,7 +158,7 @@ impl<T: ResourcePath> Path<T> {
}
/// Return iterator to items in parameter container
pub fn iter(&self) -> PathIter<T> {
pub fn iter(&self) -> PathIter<'_, T> {
PathIter {
idx: 0,
params: self,

View File

@@ -1,11 +1,11 @@
use std::cmp::min;
use std::collections::HashMap;
use std::hash::{Hash, Hasher};
use std::rc::Rc;
use regex::{escape, Regex};
use regex::{escape, Regex, RegexSet};
use crate::path::{Path, PathItem};
use crate::{Resource, ResourcePath};
use crate::{IntoPattern, Resource, ResourcePath};
const MAX_DYNAMIC_SEGMENTS: usize = 16;
@@ -31,22 +31,56 @@ enum PatternElement {
enum PatternType {
Static(String),
Prefix(String),
Dynamic(Regex, Vec<Rc<String>>, usize),
Dynamic(Regex, Vec<&'static str>, usize),
DynamicSet(RegexSet, Vec<(Regex, Vec<&'static str>, usize)>),
}
impl ResourceDef {
/// Parse path pattern and create new `Pattern` instance.
///
/// Panics if path pattern is wrong.
pub fn new(path: &str) -> Self {
ResourceDef::with_prefix(path, false)
/// Panics if path pattern is malformed.
pub fn new<T: IntoPattern>(path: T) -> Self {
if path.is_single() {
let patterns = path.patterns();
ResourceDef::with_prefix(&patterns[0], false)
} else {
let set = path.patterns();
let mut data = Vec::new();
let mut re_set = Vec::new();
for path in set {
let (pattern, _, _, len) = ResourceDef::parse(&path, false);
let re = match Regex::new(&pattern) {
Ok(re) => re,
Err(err) => panic!("Wrong path pattern: \"{}\" {}", path, err),
};
// actix creates one router per thread
let names: Vec<_> = re
.capture_names()
.filter_map(|name| {
name.map(|name| Box::leak(Box::new(name.to_owned())).as_str())
})
.collect();
data.push((re, names, len));
re_set.push(pattern);
}
ResourceDef {
id: 0,
tp: PatternType::DynamicSet(RegexSet::new(re_set).unwrap(), data),
elements: Vec::new(),
name: String::new(),
pattern: "".to_owned(),
}
}
}
/// Parse path pattern and create new `Pattern` instance.
///
/// Use `prefix` type instead of `static`.
///
/// Panics if path regex pattern is wrong.
/// Panics if path regex pattern is malformed.
pub fn prefix(path: &str) -> Self {
ResourceDef::with_prefix(path, true)
}
@@ -57,7 +91,7 @@ impl ResourceDef {
///
/// Use `prefix` type instead of `static`.
///
/// Panics if path regex pattern is wrong.
/// Panics if path regex pattern is malformed.
pub fn root_prefix(path: &str) -> Self {
ResourceDef::with_prefix(&insert_slash(path), true)
}
@@ -85,13 +119,15 @@ impl ResourceDef {
// actix creates one router per thread
let names = re
.capture_names()
.filter_map(|name| name.map(|name| Rc::new(name.to_owned())))
.filter_map(|name| {
name.map(|name| Box::leak(Box::new(name.to_owned())).as_str())
})
.collect();
PatternType::Dynamic(re, names, len)
} else if for_prefix {
PatternType::Prefix(pattern.clone())
PatternType::Prefix(pattern)
} else {
PatternType::Static(pattern.clone())
PatternType::Static(pattern)
};
ResourceDef {
@@ -99,7 +135,7 @@ impl ResourceDef {
elements,
id: 0,
name: String::new(),
pattern: path.to_owned(),
pattern: path,
}
}
@@ -119,24 +155,25 @@ impl ResourceDef {
}
#[inline]
/// Check if path matchs this pattern?
/// Check if path matches this pattern.
pub fn is_match(&self, path: &str) -> bool {
match self.tp {
PatternType::Static(ref s) => s == path,
PatternType::Dynamic(ref re, _, _) => re.is_match(path),
PatternType::Prefix(ref s) => path.starts_with(s),
PatternType::Dynamic(ref re, _, _) => re.is_match(path),
PatternType::DynamicSet(ref re, _) => re.is_match(path),
}
}
/// Is prefix path a match against this resource?
/// Is prefix path a match against this resource.
pub fn is_prefix_match(&self, path: &str) -> Option<usize> {
let plen = path.len();
let p_len = path.len();
let path = if path.is_empty() { "/" } else { path };
match self.tp {
PatternType::Static(ref s) => {
if s == path {
Some(plen)
Some(p_len)
} else {
None
}
@@ -174,12 +211,36 @@ impl ResourceDef {
} else {
return None;
};
Some(min(plen, len))
Some(min(p_len, len))
}
PatternType::DynamicSet(ref re, ref params) => {
if let Some(idx) = re.matches(path).into_iter().next() {
let (ref pattern, _, len) = params[idx];
if let Some(captures) = pattern.captures(path) {
let mut pos = 0;
let mut passed = false;
for capture in captures.iter() {
if let Some(ref m) = capture {
if !passed {
passed = true;
continue;
}
pos = m.end();
}
}
Some(pos + len)
} else {
None
}
} else {
None
}
}
}
}
/// Is the given path and parameters a match against this pattern?
/// Is the given path and parameters a match against this pattern.
pub fn match_path<T: ResourcePath>(&self, path: &mut Path<T>) -> bool {
match self.tp {
PatternType::Static(ref s) => {
@@ -190,6 +251,25 @@ impl ResourceDef {
false
}
}
PatternType::Prefix(ref s) => {
let r_path = path.path();
let len = if s == r_path {
s.len()
} else if r_path.starts_with(s)
&& (s.ends_with('/') || r_path.split_at(s.len()).1.starts_with('/'))
{
if s.ends_with('/') {
s.len() - 1
} else {
s.len()
}
} else {
return false;
};
let r_path_len = r_path.len();
path.skip(min(r_path_len, len) as u16);
true
}
PatternType::Dynamic(ref re, ref names, len) => {
let mut idx = 0;
let mut pos = 0;
@@ -214,28 +294,45 @@ impl ResourceDef {
return false;
}
for idx in 0..idx {
path.add(names[idx].clone(), segments[idx]);
path.add(names[idx], segments[idx]);
}
path.skip((pos + len) as u16);
true
}
PatternType::Prefix(ref s) => {
let rpath = path.path();
let len = if s == rpath {
s.len()
} else if rpath.starts_with(s)
&& (s.ends_with('/') || rpath.split_at(s.len()).1.starts_with('/'))
{
if s.ends_with('/') {
s.len() - 1
PatternType::DynamicSet(ref re, ref params) => {
if let Some(idx) = re.matches(path.path()).into_iter().next() {
let (ref pattern, ref names, len) = params[idx];
let mut idx = 0;
let mut pos = 0;
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
if let Some(captures) = pattern.captures(path.path()) {
for (no, name) in names.iter().enumerate() {
if let Some(m) = captures.name(&name) {
idx += 1;
pos = m.end();
segments[no] =
PathItem::Segment(m.start() as u16, m.end() as u16);
} else {
log::error!(
"Dynamic path match but not all segments found: {}",
name
);
return false;
}
}
} else {
s.len()
return false;
}
for idx in 0..idx {
path.add(names[idx], segments[idx]);
}
path.skip((pos + len) as u16);
true
} else {
return false;
};
path.skip(min(rpath.len(), len) as u16);
true
false
}
}
}
}
@@ -262,6 +359,30 @@ impl ResourceDef {
false
}
}
PatternType::Prefix(ref s) => {
let len = {
let r_path = res.resource_path().path();
if s == r_path {
s.len()
} else if r_path.starts_with(s)
&& (s.ends_with('/') || r_path.split_at(s.len()).1.starts_with('/'))
{
if s.ends_with('/') {
s.len() - 1
} else {
s.len()
}
} else {
return false;
}
};
if !check(res, user_data) {
return false;
}
let path = res.resource_path();
path.skip(min(path.path().len(), len) as u16);
true
}
PatternType::Dynamic(ref re, ref names, len) => {
let mut idx = 0;
let mut pos = 0;
@@ -292,34 +413,52 @@ impl ResourceDef {
let path = res.resource_path();
for idx in 0..idx {
path.add(names[idx].clone(), segments[idx]);
path.add(names[idx], segments[idx]);
}
path.skip((pos + len) as u16);
true
}
PatternType::Prefix(ref s) => {
let len = {
let rpath = res.resource_path().path();
if s == rpath {
s.len()
} else if rpath.starts_with(s)
&& (s.ends_with('/') || rpath.split_at(s.len()).1.starts_with('/'))
{
if s.ends_with('/') {
s.len() - 1
} else {
s.len()
PatternType::DynamicSet(ref re, ref params) => {
let path = res.resource_path().path();
if let Some(idx) = re.matches(path).into_iter().next() {
let (ref pattern, ref names, len) = params[idx];
let mut idx = 0;
let mut pos = 0;
let mut segments: [PathItem; MAX_DYNAMIC_SEGMENTS] =
[PathItem::Static(""); MAX_DYNAMIC_SEGMENTS];
if let Some(captures) = pattern.captures(path) {
for (no, name) in names.iter().enumerate() {
if let Some(m) = captures.name(&name) {
idx += 1;
pos = m.end();
segments[no] =
PathItem::Segment(m.start() as u16, m.end() as u16);
} else {
log::error!(
"Dynamic path match but not all segments found: {}",
name
);
return false;
}
}
} else {
return false;
}
};
if !check(res, user_data) {
return false;
if !check(res, user_data) {
return false;
}
let path = res.resource_path();
for idx in 0..idx {
path.add(names[idx], segments[idx]);
}
path.skip((pos + len) as u16);
true
} else {
false
}
let path = res.resource_path();
path.skip(min(path.path().len(), len) as u16);
true
}
}
}
@@ -347,7 +486,45 @@ impl ResourceDef {
}
}
}
};
PatternType::DynamicSet(..) => {
return false;
}
}
true
}
/// Build resource path from elements. Returns `true` on success.
pub fn resource_path_named<K, V, S>(
&self,
path: &mut String,
elements: &HashMap<K, V, S>,
) -> bool
where
K: std::borrow::Borrow<str> + Eq + Hash,
V: AsRef<str>,
S: std::hash::BuildHasher,
{
match self.tp {
PatternType::Prefix(ref p) => path.push_str(p),
PatternType::Static(ref p) => path.push_str(p),
PatternType::Dynamic(..) => {
for el in &self.elements {
match *el {
PatternElement::Str(ref s) => path.push_str(s),
PatternElement::Var(ref name) => {
if let Some(val) = elements.get(name) {
path.push_str(val.as_ref())
} else {
return false;
}
}
}
}
}
PatternType::DynamicSet(..) => {
return false;
}
}
true
}
@@ -403,6 +580,8 @@ impl ResourceDef {
mut for_prefix: bool,
) -> (String, Vec<PatternElement>, bool, usize) {
if pattern.find('{').is_none() {
// TODO: MSRV: 1.45
#[allow(clippy::manual_strip)]
return if pattern.ends_with('*') {
let path = &pattern[..pattern.len() - 1];
let re = String::from("^") + path + "(.*)";
@@ -417,39 +596,39 @@ impl ResourceDef {
};
}
let mut elems = Vec::new();
let mut elements = Vec::new();
let mut re = String::from("^");
let mut dyn_elems = 0;
let mut dyn_elements = 0;
while let Some(idx) = pattern.find('{') {
let (prefix, rem) = pattern.split_at(idx);
elems.push(PatternElement::Str(String::from(prefix)));
elements.push(PatternElement::Str(String::from(prefix)));
re.push_str(&escape(prefix));
let (param_pattern, re_part, rem, tail) = Self::parse_param(rem);
if tail {
for_prefix = true;
}
elems.push(param_pattern);
elements.push(param_pattern);
re.push_str(&re_part);
pattern = rem;
dyn_elems += 1;
dyn_elements += 1;
}
elems.push(PatternElement::Str(String::from(pattern)));
elements.push(PatternElement::Str(String::from(pattern)));
re.push_str(&escape(pattern));
if dyn_elems > MAX_DYNAMIC_SEGMENTS {
if dyn_elements > MAX_DYNAMIC_SEGMENTS {
panic!(
"Only {} dynanic segments are allowed, provided: {}",
MAX_DYNAMIC_SEGMENTS, dyn_elems
"Only {} dynamic segments are allowed, provided: {}",
MAX_DYNAMIC_SEGMENTS, dyn_elements
);
}
if !for_prefix {
re.push_str("$");
re.push('$');
}
(re, elems, true, pattern.chars().count())
(re, elements, true, pattern.chars().count())
}
}
@@ -475,7 +654,7 @@ impl<'a> From<&'a str> for ResourceDef {
impl From<String> for ResourceDef {
fn from(path: String) -> ResourceDef {
ResourceDef::new(&path)
ResourceDef::new(path)
}
}
@@ -490,7 +669,8 @@ pub(crate) fn insert_slash(path: &str) -> String {
#[cfg(test)]
mod tests {
use super::*;
use http::{HttpTryFrom, Uri};
use http::Uri;
use std::convert::TryFrom;
#[test]
fn test_parse_static() {
@@ -540,10 +720,10 @@ mod tests {
assert!(!re.is_match("/v/resource/1"));
assert!(!re.is_match("/resource"));
let mut path = Path::new("/v151/resource/adahg32");
let mut path = Path::new("/v151/resource/adage32");
assert!(re.match_path(&mut path));
assert_eq!(path.get("version").unwrap(), "151");
assert_eq!(path.get("id").unwrap(), "adahg32");
assert_eq!(path.get("id").unwrap(), "adage32");
let re = ResourceDef::new("/{id:[[:digit:]]{6}}");
assert!(re.is_match("/012345"));
@@ -556,6 +736,66 @@ mod tests {
assert_eq!(path.get("id").unwrap(), "012345");
}
#[allow(clippy::cognitive_complexity)]
#[test]
fn test_dynamic_set() {
let re = ResourceDef::new(vec![
"/user/{id}",
"/v{version}/resource/{id}",
"/{id:[[:digit:]]{6}}",
]);
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(!re.is_match("/user/2345/"));
assert!(!re.is_match("/user/2345/sdg"));
let mut path = Path::new("/user/profile");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "profile");
let mut path = Path::new("/user/1245125");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "1245125");
assert!(re.is_match("/v1/resource/320120"));
assert!(!re.is_match("/v/resource/1"));
assert!(!re.is_match("/resource"));
let mut path = Path::new("/v151/resource/adage32");
assert!(re.match_path(&mut path));
assert_eq!(path.get("version").unwrap(), "151");
assert_eq!(path.get("id").unwrap(), "adage32");
assert!(re.is_match("/012345"));
assert!(!re.is_match("/012"));
assert!(!re.is_match("/01234567"));
assert!(!re.is_match("/XXXXXX"));
let mut path = Path::new("/012345");
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "012345");
let re = ResourceDef::new([
"/user/{id}",
"/v{version}/resource/{id}",
"/{id:[[:digit:]]{6}}",
]);
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(!re.is_match("/user/2345/"));
assert!(!re.is_match("/user/2345/sdg"));
let re = ResourceDef::new([
"/user/{id}".to_string(),
"/v{version}/resource/{id}".to_string(),
"/{id:[[:digit:]]{6}}".to_string(),
]);
assert!(re.is_match("/user/profile"));
assert!(re.is_match("/user/2345"));
assert!(!re.is_match("/user/2345/"));
assert!(!re.is_match("/user/2345/sdg"));
}
#[test]
fn test_parse_tail() {
let re = ResourceDef::new("/user/-{id}*");
@@ -637,7 +877,7 @@ mod tests {
}
#[test]
fn test_reousrce_prefix_dynamic() {
fn test_resource_prefix_dynamic() {
let re = ResourceDef::prefix("/{name}/");
assert!(re.is_match("/name/"));
assert!(re.is_match("/name/gs"));
@@ -657,4 +897,50 @@ mod tests {
assert_eq!(&path["name"], "test2");
assert_eq!(&path[0], "test2");
}
#[test]
fn test_resource_path() {
let mut s = String::new();
let resource = ResourceDef::new("/user/{item1}/test");
assert!(resource.resource_path(&mut s, &mut (&["user1"]).iter()));
assert_eq!(s, "/user/user1/test");
let mut s = String::new();
let resource = ResourceDef::new("/user/{item1}/{item2}/test");
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
assert_eq!(s, "/user/item/item2/test");
let mut s = String::new();
let resource = ResourceDef::new("/user/{item1}/{item2}");
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
assert_eq!(s, "/user/item/item2");
let mut s = String::new();
let resource = ResourceDef::new("/user/{item1}/{item2}/");
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
assert_eq!(s, "/user/item/item2/");
let mut s = String::new();
assert!(!resource.resource_path(&mut s, &mut (&["item"]).iter()));
let mut s = String::new();
assert!(resource.resource_path(&mut s, &mut (&["item", "item2"]).iter()));
assert_eq!(s, "/user/item/item2/");
assert!(!resource.resource_path(&mut s, &mut (&["item"]).iter()));
let mut s = String::new();
assert!(resource.resource_path(&mut s, &mut vec!["item", "item2"].into_iter()));
assert_eq!(s, "/user/item/item2/");
let mut map = HashMap::new();
map.insert("item1", "item");
let mut s = String::new();
assert!(!resource.resource_path_named(&mut s, &map));
let mut s = String::new();
map.insert("item2", "item2");
assert!(resource.resource_path_named(&mut s, &map));
assert_eq!(s, "/user/item/item2/");
}
}

View File

@@ -1,4 +1,4 @@
use crate::{Resource, ResourceDef, ResourcePath};
use crate::{IntoPattern, Resource, ResourceDef, ResourcePath};
#[derive(Debug, Copy, Clone, PartialEq)]
pub struct ResourceId(pub u16);
@@ -70,7 +70,11 @@ pub struct RouterBuilder<T, U = ()> {
impl<T, U> RouterBuilder<T, U> {
/// Register resource for specified path.
pub fn path(&mut self, path: &str, resource: T) -> &mut (ResourceDef, T, Option<U>) {
pub fn path<P: IntoPattern>(
&mut self,
path: P,
resource: T,
) -> &mut (ResourceDef, T, Option<U>) {
self.resources
.push((ResourceDef::new(path), resource, None));
self.resources.last_mut().unwrap()
@@ -100,6 +104,7 @@ mod tests {
use crate::path::Path;
use crate::router::{ResourceId, Router};
#[allow(clippy::cognitive_complexity)]
#[test]
fn test_recognizer_1() {
let mut router = Router::<usize>::build();

View File

@@ -31,7 +31,7 @@ fn set_bit(array: &mut [u8], ch: u8) {
}
thread_local! {
static DEFAULT_QUOTER: Quoter = { Quoter::new(b"@:", b"/+") };
static DEFAULT_QUOTER: Quoter = Quoter::new(b"@:", b"/+");
}
#[derive(Default, Clone, Debug)]
@@ -182,11 +182,11 @@ impl Quoter {
#[inline]
fn from_hex(v: u8) -> Option<u8> {
if v >= b'0' && v <= b'9' {
if (b'0'..=b'9').contains(&v) {
Some(v - 0x30) // ord('0') == 0x30
} else if v >= b'A' && v <= b'F' {
} else if (b'A'..=b'F').contains(&v) {
Some(v - 0x41 + 10) // ord('A') == 0x41
} else if v > b'a' && v <= b'f' {
} else if (b'a'..=b'f').contains(&v) {
Some(v - 0x61 + 10) // ord('a') == 0x61
} else {
None
@@ -195,12 +195,13 @@ fn from_hex(v: u8) -> Option<u8> {
#[inline]
fn restore_ch(d1: u8, d2: u8) -> Option<u8> {
from_hex(d1).and_then(|d1| from_hex(d2).and_then(move |d2| Some(d1 << 4 | d2)))
from_hex(d1).and_then(|d1| from_hex(d2).map(move |d2| d1 << 4 | d2))
}
#[cfg(test)]
mod tests {
use http::{HttpTryFrom, Uri};
use http::Uri;
use std::convert::TryFrom;
use super::*;
use crate::{Path, ResourceDef};
@@ -224,4 +225,25 @@ mod tests {
assert!(re.match_path(&mut path));
assert_eq!(path.get("id").unwrap(), "qwe%rty");
}
#[test]
fn test_from_hex() {
let hex = b"0123456789abcdefABCDEF";
for i in 0..256 {
let c = i as u8;
if hex.contains(&c) {
assert!(from_hex(c).is_some())
} else {
assert!(from_hex(c).is_none())
}
}
let expected = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 10, 11, 12, 13, 14, 15,
];
for i in 0..hex.len() {
assert_eq!(from_hex(hex[i]).unwrap(), expected[i]);
}
}
}

View File

@@ -1,5 +1,96 @@
# Changes
## Unreleased - 2021-xx-xx
* Add `task` mod with re-export of `tokio::task::{spawn_blocking, yield_now, JoinHandle}` [#245]
[#245]: https://github.com/actix/actix-net/pull/245
## 2.0.0-beta.1 - 2020-12-28
### Added
* Add `System::attach_to_tokio` method. [#173]
### Changed
* Update `tokio` dependency to `1.0`. [#236]
* Rename `time` module `delay_for` to `sleep`, `delay_until` to `sleep_until`, `Delay` to `Sleep`
to stay aligned with Tokio's naming. [#236]
* Remove `'static` lifetime requirement for `Runtime::block_on` and `SystemRunner::block_on`.
* These methods now accept `&self` when calling. [#236]
* Remove `'static` lifetime requirement for `System::run` and `Builder::run`. [#236]
* `Arbiter::spawn` now panics when `System` is not in scope. [#207]
### Fixed
* Fix work load issue by removing `PENDING` thread local. [#207]
[#207]: https://github.com/actix/actix-net/pull/207
[#236]: https://github.com/actix/actix-net/pull/236
## [1.1.1] - 2020-04-30
### Fixed
* Fix memory leak due to [#94] (see [#129] for more detail)
[#129]: https://github.com/actix/actix-net/issues/129
## [1.1.0] - 2020-04-08
**This version has been yanked.**
### Added
* Expose `System::is_set` to check if current system has ben started [#99]
* Add `Arbiter::is_running` to check if event loop is running [#124]
* Add `Arbiter::local_join` associated function
to get be able to `await` for spawned futures [#94]
[#94]: https://github.com/actix/actix-net/pull/94
[#99]: https://github.com/actix/actix-net/pull/99
[#124]: https://github.com/actix/actix-net/pull/124
## [1.0.0] - 2019-12-11
* Update dependencies
## [1.0.0-alpha.3] - 2019-12-07
### Fixed
* Fix compilation on non-unix platforms
### Changed
* Migrate to tokio 0.2
## [1.0.0-alpha.2] - 2019-12-02
Added
* Export `main` and `test` attribute macros
* Export `time` module (re-export of tokio-timer)
* Export `net` module (re-export of tokio-net)
## [1.0.0-alpha.1] - 2019-11-22
### Changed
* Migrate to std::future and tokio 0.2
## [0.2.6] - 2019-11-14
### Fixed
* Fix arbiter's thread panic message.
### Added
* Allow to join arbiter's thread. #60
## [0.2.5] - 2019-09-02
### Added

View File

@@ -1,27 +1,21 @@
[package]
name = "actix-rt"
version = "0.2.5"
version = "2.0.0-beta.1"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix runtime"
description = "Tokio-based single-thread async runtime for the Actix ecosystem"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-rt/"
categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0"
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
license = "MIT OR Apache-2.0"
edition = "2018"
workspace = ".."
[lib]
name = "actix_rt"
path = "src/lib.rs"
[dependencies]
actix-threadpool = "0.1.1"
futures = "0.1.25"
tokio-current-thread = "0.1"
tokio-executor = "0.1.5"
tokio-reactor = "0.1.7"
tokio-timer = "0.2.8"
copyless = "0.1.4"
actix-macros = "0.1.0"
tokio = { version = "1", features = ["rt", "net", "parking_lot", "signal", "sync", "time"] }

View File

@@ -1,23 +1,30 @@
use std::any::{Any, TypeId};
use std::cell::{Cell, RefCell};
use std::cell::RefCell;
use std::collections::HashMap;
use std::future::Future;
use std::pin::Pin;
use std::sync::atomic::{AtomicUsize, Ordering};
use std::task::{Context, Poll};
use std::{fmt, thread};
use futures::sync::mpsc::{unbounded, UnboundedReceiver, UnboundedSender};
use futures::sync::oneshot::{channel, Canceled, Sender};
use futures::{future, Async, Future, IntoFuture, Poll, Stream};
use tokio_current_thread::spawn;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tokio::sync::oneshot::{channel, error::RecvError as Canceled, Sender};
// use futures_util::stream::FuturesUnordered;
// use tokio::task::JoinHandle;
// use tokio::stream::StreamExt;
use tokio::task::LocalSet;
use crate::builder::Builder;
use crate::runtime::Runtime;
use crate::system::System;
use copyless::BoxHelper;
thread_local!(
static ADDR: RefCell<Option<Arbiter>> = RefCell::new(None);
static RUNNING: Cell<bool> = Cell::new(false);
static Q: RefCell<Vec<Box<dyn Future<Item = (), Error = ()>>>> = RefCell::new(Vec::new());
// TODO: Commented out code are for Arbiter::local_join function.
// It can be safely removed if this function is not used in actix-*.
//
// /// stores join handle for spawned async tasks.
// static HANDLE: RefCell<FuturesUnordered<JoinHandle<()>>> =
// RefCell::new(FuturesUnordered::new());
static STORAGE: RefCell<HashMap<TypeId, Box<dyn Any>>> = RefCell::new(HashMap::new());
);
@@ -25,12 +32,12 @@ pub(crate) static COUNT: AtomicUsize = AtomicUsize::new(0);
pub(crate) enum ArbiterCommand {
Stop,
Execute(Box<dyn Future<Item = (), Error = ()> + Send>),
Execute(Box<dyn Future<Output = ()> + Unpin + Send>),
ExecuteFn(Box<dyn FnExec>),
}
impl fmt::Debug for ArbiterCommand {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
ArbiterCommand::Stop => write!(f, "ArbiterCommand::Stop"),
ArbiterCommand::Execute(_) => write!(f, "ArbiterCommand::Execute"),
@@ -39,11 +46,20 @@ impl fmt::Debug for ArbiterCommand {
}
}
#[derive(Debug, Clone)]
#[derive(Debug)]
/// Arbiters provide an asynchronous execution environment for actors, functions
/// and futures. When an Arbiter is created, they spawn a new OS thread, and
/// host an event loop. Some Arbiter functions execute on the current thread.
pub struct Arbiter(UnboundedSender<ArbiterCommand>);
/// and futures. When an Arbiter is created, it spawns a new OS thread, and
/// hosts an event loop. Some Arbiter functions execute on the current thread.
pub struct Arbiter {
sender: UnboundedSender<ArbiterCommand>,
thread_handle: Option<thread::JoinHandle<()>>,
}
impl Clone for Arbiter {
fn clone(&self) -> Self {
Self::with_sender(self.sender.clone())
}
}
impl Default for Arbiter {
fn default() -> Self {
@@ -52,14 +68,14 @@ impl Default for Arbiter {
}
impl Arbiter {
pub(crate) fn new_system() -> Self {
let (tx, rx) = unbounded();
pub(crate) fn new_system(local: &LocalSet) -> Self {
let (tx, rx) = unbounded_channel();
let arb = Arbiter(tx);
let arb = Arbiter::with_sender(tx);
ADDR.with(|cell| *cell.borrow_mut() = Some(arb.clone()));
RUNNING.with(|cell| cell.set(false));
STORAGE.with(|cell| cell.borrow_mut().clear());
Arbiter::spawn(ArbiterController { stop: None, rx });
local.spawn_local(ArbiterController { rx });
arb
}
@@ -73,9 +89,15 @@ impl Arbiter {
})
}
/// Check if current arbiter is running.
#[deprecated(note = "Thread local variables for running state of Arbiter is removed")]
pub fn is_running() -> bool {
false
}
/// Stop arbiter from continuing it's event loop.
pub fn stop(&self) {
let _ = self.0.unbounded_send(ArbiterCommand::Stop);
let _ = self.sender.send(ArbiterCommand::Stop);
}
/// Spawn new thread and run event loop in spawned thread.
@@ -84,58 +106,45 @@ impl Arbiter {
let id = COUNT.fetch_add(1, Ordering::Relaxed);
let name = format!("actix-rt:worker:{}", id);
let sys = System::current();
let (arb_tx, arb_rx) = unbounded();
let arb_tx2 = arb_tx.clone();
let (tx, rx) = unbounded_channel();
let _ = thread::Builder::new().name(name.clone()).spawn(move || {
let mut rt = Builder::new().build_rt().expect("Can not create Runtime");
let arb = Arbiter(arb_tx);
let handle = thread::Builder::new()
.name(name.clone())
.spawn({
let tx = tx.clone();
move || {
let rt = Runtime::new().expect("Can not create Runtime");
let arb = Arbiter::with_sender(tx);
let (stop, stop_rx) = channel();
RUNNING.with(|cell| cell.set(true));
STORAGE.with(|cell| cell.borrow_mut().clear());
STORAGE.with(|cell| cell.borrow_mut().clear());
System::set_current(sys);
System::set_current(sys);
// start arbiter controller
rt.spawn(ArbiterController {
stop: Some(stop),
rx: arb_rx,
ADDR.with(|cell| *cell.borrow_mut() = Some(arb.clone()));
// register arbiter
let _ = System::current()
.sys()
.send(SystemCommand::RegisterArbiter(id, arb));
// start arbiter controller
// run loop
rt.block_on(ArbiterController { rx });
// unregister arbiter
let _ = System::current()
.sys()
.send(SystemCommand::UnregisterArbiter(id));
}
})
.unwrap_or_else(|err| {
panic!("Cannot spawn an arbiter's thread {:?}: {:?}", &name, err)
});
ADDR.with(|cell| *cell.borrow_mut() = Some(arb.clone()));
// register arbiter
let _ = System::current()
.sys()
.unbounded_send(SystemCommand::RegisterArbiter(id, arb.clone()));
// run loop
let _ = match rt.block_on(stop_rx) {
Ok(code) => code,
Err(_) => 1,
};
// unregister arbiter
let _ = System::current()
.sys()
.unbounded_send(SystemCommand::UnregisterArbiter(id));
});
Arbiter(arb_tx2)
}
pub(crate) fn run_system() {
RUNNING.with(|cell| cell.set(true));
Q.with(|cell| {
let mut v = cell.borrow_mut();
for fut in v.drain(..) {
spawn(fut);
}
});
}
pub(crate) fn stop_system() {
RUNNING.with(|cell| cell.set(false));
Arbiter {
sender: tx,
thread_handle: Some(handle),
}
}
/// Spawn a future on the current thread. This does not create a new Arbiter
@@ -143,15 +152,14 @@ impl Arbiter {
/// thread.
pub fn spawn<F>(future: F)
where
F: Future<Item = (), Error = ()> + 'static,
F: Future<Output = ()> + 'static,
{
RUNNING.with(move |cell| {
if cell.get() {
spawn(Box::alloc().init(future));
} else {
Q.with(move |cell| cell.borrow_mut().push(Box::alloc().init(future)));
}
});
// HANDLE.with(|handle| {
// let handle = handle.borrow();
// handle.push(tokio::task::spawn_local(future));
// });
// let _ = tokio::task::spawn_local(CleanupPending);
let _ = tokio::task::spawn_local(future);
}
/// Executes a future on the current thread. This does not create a new Arbiter
@@ -160,19 +168,19 @@ impl Arbiter {
pub fn spawn_fn<F, R>(f: F)
where
F: FnOnce() -> R + 'static,
R: IntoFuture<Item = (), Error = ()> + 'static,
R: Future<Output = ()> + 'static,
{
Arbiter::spawn(future::lazy(f))
Arbiter::spawn(async {
f();
})
}
/// Send a future to the Arbiter's thread, and spawn it.
pub fn send<F>(&self, future: F)
where
F: Future<Item = (), Error = ()> + Send + 'static,
F: Future<Output = ()> + Send + Unpin + 'static,
{
let _ = self
.0
.unbounded_send(ArbiterCommand::Execute(Box::new(future)));
let _ = self.sender.send(ArbiterCommand::Execute(Box::new(future)));
}
/// Send a function to the Arbiter's thread, and execute it. Any result from the function
@@ -182,8 +190,8 @@ impl Arbiter {
F: FnOnce() + Send + 'static,
{
let _ = self
.0
.unbounded_send(ArbiterCommand::ExecuteFn(Box::new(move || {
.sender
.send(ArbiterCommand::ExecuteFn(Box::new(move || {
f();
})));
}
@@ -191,16 +199,16 @@ impl Arbiter {
/// Send a function to the Arbiter's thread. This function will be executed asynchronously.
/// A future is created, and when resolved will contain the result of the function sent
/// to the Arbiters thread.
pub fn exec<F, R>(&self, f: F) -> impl Future<Item = R, Error = Canceled>
pub fn exec<F, R>(&self, f: F) -> impl Future<Output = Result<R, Canceled>>
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
let (tx, rx) = channel();
let _ = self
.0
.unbounded_send(ArbiterCommand::ExecuteFn(Box::new(move || {
if !tx.is_canceled() {
.sender
.send(ArbiterCommand::ExecuteFn(Box::new(move || {
if !tx.is_closed() {
let _ = tx.send(f());
}
})));
@@ -250,52 +258,109 @@ impl Arbiter {
f(item)
})
}
fn with_sender(sender: UnboundedSender<ArbiterCommand>) -> Self {
Self {
sender,
thread_handle: None,
}
}
/// Wait for the event loop to stop by joining the underlying thread (if have Some).
pub fn join(&mut self) -> thread::Result<()> {
if let Some(thread_handle) = self.thread_handle.take() {
thread_handle.join()
} else {
Ok(())
}
}
/// Returns a future that will be completed once all currently spawned futures
/// have completed.
#[deprecated(since = "1.2.0", note = "Arbiter::local_join function is removed.")]
pub async fn local_join() {
// let handle = HANDLE.with(|fut| std::mem::take(&mut *fut.borrow_mut()));
// async move {
// handle.collect::<Vec<_>>().await;
// }
unimplemented!("Arbiter::local_join function is removed.")
}
}
// /// Future used for cleaning-up already finished `JoinHandle`s
// /// from the `PENDING` list so the vector doesn't grow indefinitely
// struct CleanupPending;
//
// impl Future for CleanupPending {
// type Output = ();
//
// fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// HANDLE.with(move |handle| {
// recycle_join_handle(&mut *handle.borrow_mut(), cx);
// });
//
// Poll::Ready(())
// }
// }
struct ArbiterController {
stop: Option<Sender<i32>>,
rx: UnboundedReceiver<ArbiterCommand>,
}
impl Drop for ArbiterController {
fn drop(&mut self) {
if thread::panicking() {
eprintln!("Panic in Arbiter thread, shutting down system.");
if System::current().stop_on_panic() {
eprintln!("Panic in Arbiter thread, shutting down system.");
System::current().stop_with_code(1)
} else {
eprintln!("Panic in Arbiter thread.");
}
}
}
}
impl Future for ArbiterController {
type Item = ();
type Error = ();
type Output = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
loop {
match self.rx.poll() {
Ok(Async::Ready(None)) | Err(_) => return Ok(Async::Ready(())),
Ok(Async::Ready(Some(item))) => match item {
ArbiterCommand::Stop => {
if let Some(stop) = self.stop.take() {
let _ = stop.send(0);
};
return Ok(Async::Ready(()));
}
match Pin::new(&mut self.rx).poll_recv(cx) {
Poll::Ready(None) => return Poll::Ready(()),
Poll::Ready(Some(item)) => match item {
ArbiterCommand::Stop => return Poll::Ready(()),
ArbiterCommand::Execute(fut) => {
spawn(fut);
// HANDLE.with(|handle| {
// let mut handle = handle.borrow_mut();
// handle.push(tokio::task::spawn_local(fut));
// recycle_join_handle(&mut *handle, cx);
// });
tokio::task::spawn_local(fut);
}
ArbiterCommand::ExecuteFn(f) => {
f.call_box();
}
},
Ok(Async::NotReady) => return Ok(Async::NotReady),
Poll::Pending => return Poll::Pending,
}
}
}
}
// fn recycle_join_handle(handle: &mut FuturesUnordered<JoinHandle<()>>, cx: &mut Context<'_>) {
// let _ = Pin::new(&mut *handle).poll_next(cx);
//
// // Try to recycle more join handles and free up memory.
// //
// // this is a guess. The yield limit for FuturesUnordered is 32.
// // So poll an extra 3 times would make the total poll below 128.
// if handle.len() > 64 {
// (0..3).for_each(|_| {
// let _ = Pin::new(&mut *handle).poll_next(cx);
// })
// }
// }
#[derive(Debug)]
pub(crate) enum SystemCommand {
Exit(i32),
@@ -321,14 +386,13 @@ impl SystemArbiter {
}
impl Future for SystemArbiter {
type Item = ();
type Error = ();
type Output = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
loop {
match self.commands.poll() {
Ok(Async::Ready(None)) | Err(_) => return Ok(Async::Ready(())),
Ok(Async::Ready(Some(cmd))) => match cmd {
match Pin::new(&mut self.commands).poll_recv(cx) {
Poll::Ready(None) => return Poll::Ready(()),
Poll::Ready(Some(cmd)) => match cmd {
SystemCommand::Exit(code) => {
// stop arbiters
for arb in self.arbiters.values() {
@@ -346,7 +410,7 @@ impl Future for SystemArbiter {
self.arbiters.remove(&name);
}
},
Ok(Async::NotReady) => return Ok(Async::NotReady),
Poll::Pending => return Poll::Pending,
}
}
}

View File

@@ -1,15 +1,10 @@
use std::borrow::Cow;
use std::future::Future;
use std::io;
use futures::future;
use futures::future::{lazy, Future};
use futures::sync::mpsc::unbounded;
use futures::sync::oneshot::{channel, Receiver};
use tokio_current_thread::{CurrentThread, Handle};
use tokio_reactor::Reactor;
use tokio_timer::clock::Clock;
use tokio_timer::timer::Timer;
use tokio::sync::mpsc::unbounded_channel;
use tokio::sync::oneshot::{channel, Receiver};
use tokio::task::LocalSet;
use crate::arbiter::{Arbiter, SystemArbiter};
use crate::runtime::Runtime;
@@ -24,9 +19,6 @@ pub struct Builder {
/// Name of the System. Defaults to "actix" if unset.
name: Cow<'static, str>,
/// The clock to use
clock: Clock,
/// Whether the Arbiter will stop the whole System on uncaught panic. Defaults to false.
stop_on_panic: bool,
}
@@ -35,7 +27,6 @@ impl Builder {
pub(crate) fn new() -> Self {
Builder {
name: Cow::Borrowed("actix"),
clock: Clock::new(),
stop_on_panic: false,
}
}
@@ -46,14 +37,6 @@ impl Builder {
self
}
/// Set the Clock instance that will be used by this System.
///
/// Defaults to the system clock.
pub fn clock(mut self, clock: Clock) -> Self {
self.clock = clock;
self
}
/// Sets the option 'stop_on_panic' which controls whether the System is stopped when an
/// uncaught panic is thrown from a worker thread.
///
@@ -73,8 +56,8 @@ impl Builder {
/// Create new System that can run asynchronously.
///
/// This method panics if it cannot start the system arbiter
pub(crate) fn build_async(self, executor: Handle) -> AsyncSystemRunner {
self.create_async_runtime(executor)
pub(crate) fn build_async(self, local: &LocalSet) -> AsyncSystemRunner {
self.create_async_runtime(local)
}
/// This function will start tokio runtime and will finish once the
@@ -82,72 +65,52 @@ impl Builder {
/// Function `f` get called within tokio runtime context.
pub fn run<F>(self, f: F) -> io::Result<()>
where
F: FnOnce() + 'static,
F: FnOnce(),
{
self.create_runtime(f).run()
}
fn create_async_runtime(self, executor: Handle) -> AsyncSystemRunner {
fn create_async_runtime(self, local: &LocalSet) -> AsyncSystemRunner {
let (stop_tx, stop) = channel();
let (sys_sender, sys_receiver) = unbounded();
let (sys_sender, sys_receiver) = unbounded_channel();
let system = System::construct(sys_sender, Arbiter::new_system(), self.stop_on_panic);
let system =
System::construct(sys_sender, Arbiter::new_system(local), self.stop_on_panic);
// system arbiter
let arb = SystemArbiter::new(stop_tx, sys_receiver);
// start the system arbiter
executor.spawn(arb).expect("could not start system arbiter");
let _ = local.spawn_local(arb);
AsyncSystemRunner { stop, system }
}
fn create_runtime<F>(self, f: F) -> SystemRunner
where
F: FnOnce() + 'static,
F: FnOnce(),
{
let (stop_tx, stop) = channel();
let (sys_sender, sys_receiver) = unbounded();
let (sys_sender, sys_receiver) = unbounded_channel();
let system = System::construct(sys_sender, Arbiter::new_system(), self.stop_on_panic);
let rt = Runtime::new().unwrap();
let system = System::construct(
sys_sender,
Arbiter::new_system(rt.local()),
self.stop_on_panic,
);
// system arbiter
let arb = SystemArbiter::new(stop_tx, sys_receiver);
let mut rt = self.build_rt().unwrap();
rt.spawn(arb);
// init system arbiter and run configuration method
let _ = rt.block_on(lazy(move || {
f();
Ok::<_, ()>(())
}));
rt.block_on(async { f() });
SystemRunner { rt, stop, system }
}
pub(crate) fn build_rt(&self) -> io::Result<Runtime> {
// We need a reactor to receive events about IO objects from kernel
let reactor = Reactor::new()?;
let reactor_handle = reactor.handle();
// Place a timer wheel on top of the reactor. If there are no timeouts to fire, it'll let the
// reactor pick up some new external events.
let timer = Timer::new_with_now(reactor, self.clock.clone());
let timer_handle = timer.handle();
// And now put a single-threaded executor on top of the timer. When there are no futures ready
// to do something, it'll let the timer or the reactor to generate some new stimuli for the
// futures to continue in their life.
let executor = CurrentThread::new_with_park(timer);
Ok(Runtime::new2(
reactor_handle,
timer_handle,
self.clock.clone(),
executor,
))
}
}
#[derive(Debug)]
@@ -159,13 +122,12 @@ pub(crate) struct AsyncSystemRunner {
impl AsyncSystemRunner {
/// This function will start event loop and returns a future that
/// resolves once the `System::stop()` function is called.
pub(crate) fn run_nonblocking(self) -> impl Future<Item = (), Error = io::Error> + Send {
pub(crate) fn run_nonblocking(self) -> impl Future<Output = Result<(), io::Error>> + Send {
let AsyncSystemRunner { stop, .. } = self;
// run loop
future::lazy(|| {
Arbiter::run_system();
stop.then(|res| match res {
async {
match stop.await {
Ok(code) => {
if code != 0 {
Err(io::Error::new(
@@ -177,12 +139,8 @@ impl AsyncSystemRunner {
}
}
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
})
.then(|result| {
Arbiter::stop_system();
result
})
})
}
}
}
}
@@ -199,14 +157,10 @@ impl SystemRunner {
/// This function will start event loop and will finish once the
/// `System::stop()` function is called.
pub fn run(self) -> io::Result<()> {
let SystemRunner { mut rt, stop, .. } = self;
let SystemRunner { rt, stop, .. } = self;
// run loop
let _ = rt.block_on(lazy(move || {
Arbiter::run_system();
Ok::<_, ()>(())
}));
let result = match rt.block_on(stop) {
match rt.block_on(stop) {
Ok(code) => {
if code != 0 {
Err(io::Error::new(
@@ -218,25 +172,12 @@ impl SystemRunner {
}
}
Err(e) => Err(io::Error::new(io::ErrorKind::Other, e)),
};
Arbiter::stop_system();
result
}
}
/// Execute a future and wait for result.
pub fn block_on<F, I, E>(&mut self, fut: F) -> Result<I, E>
where
F: Future<Item = I, Error = E>,
{
let _ = self.rt.block_on(lazy(move || {
Arbiter::run_system();
Ok::<_, ()>(())
}));
let res = self.rt.block_on(fut);
let _ = self.rt.block_on(lazy(move || {
Arbiter::stop_system();
Ok::<_, ()>(())
}));
res
#[inline]
pub fn block_on<F: Future>(&self, fut: F) -> F::Output {
self.rt.block_on(fut)
}
}

View File

@@ -1,4 +1,14 @@
//! A runtime implementation that runs everything on the current thread.
//! Tokio-based single-thread async runtime for the Actix ecosystem.
#![deny(rust_2018_idioms, nonstandard_style)]
#![allow(clippy::type_complexity)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
use std::future::Future;
#[cfg(not(test))] // Work around for rust-lang/rust#62127
pub use actix_macros::{main, test};
mod arbiter;
mod builder;
@@ -10,21 +20,51 @@ pub use self::builder::{Builder, SystemRunner};
pub use self::runtime::Runtime;
pub use self::system::System;
#[doc(hidden)]
pub use actix_threadpool as blocking;
/// Spawns a future on the current arbiter.
///
/// # Panics
///
/// This function panics if actix system is not running.
#[inline]
pub fn spawn<F>(f: F)
where
F: futures::Future<Item = (), Error = ()> + 'static,
F: Future<Output = ()> + 'static,
{
if !System::is_set() {
panic!("System is not running");
Arbiter::spawn(f)
}
/// Asynchronous signal handling
pub mod signal {
#[cfg(unix)]
pub mod unix {
pub use tokio::signal::unix::*;
}
pub use tokio::signal::ctrl_c;
}
/// TCP/UDP/Unix bindings
pub mod net {
pub use tokio::net::UdpSocket;
pub use tokio::net::{TcpListener, TcpStream};
#[cfg(unix)]
mod unix {
pub use tokio::net::{UnixDatagram, UnixListener, UnixStream};
}
Arbiter::spawn(f);
#[cfg(unix)]
pub use self::unix::*;
}
/// Utilities for tracking time.
pub mod time {
pub use tokio::time::Instant;
pub use tokio::time::{interval, interval_at, Interval};
pub use tokio::time::{sleep, sleep_until, Sleep};
pub use tokio::time::{timeout, Timeout};
}
/// task management.
pub mod task {
pub use tokio::task::{spawn_blocking, yield_now, JoinHandle};
}

View File

@@ -1,92 +0,0 @@
//! A runtime implementation that runs everything on the current thread.
//!
//! [`current_thread::Runtime`][rt] is similar to the primary
//! [`Runtime`][concurrent-rt] except that it runs all components on the current
//! thread instead of using a thread pool. This means that it is able to spawn
//! futures that do not implement `Send`.
//!
//! Same as the default [`Runtime`][concurrent-rt], the
//! [`current_thread::Runtime`][rt] includes:
//!
//! * A [reactor] to drive I/O resources.
//! * An [executor] to execute tasks that use these I/O resources.
//! * A [timer] for scheduling work to run after a set period of time.
//!
//! Note that [`current_thread::Runtime`][rt] does not implement `Send` itself
//! and cannot be safely moved to other threads.
//!
//! # Spawning from other threads
//!
//! While [`current_thread::Runtime`][rt] does not implement `Send` and cannot
//! safely be moved to other threads, it provides a `Handle` that can be sent
//! to other threads and allows to spawn new tasks from there.
//!
//! For example:
//!
//! ```
//! # extern crate tokio;
//! # extern crate futures;
//! use tokio::runtime::current_thread::Runtime;
//! use tokio::prelude::*;
//! use std::thread;
//!
//! # fn main() {
//! let mut runtime = Runtime::new().unwrap();
//! let handle = runtime.handle();
//!
//! thread::spawn(move || {
//! handle.spawn(future::ok(()));
//! }).join().unwrap();
//!
//! # /*
//! runtime.run().unwrap();
//! # */
//! # }
//! ```
//!
//! # Examples
//!
//! Creating a new `Runtime` and running a future `f` until its completion and
//! returning its result.
//!
//! ```
//! use tokio::runtime::current_thread::Runtime;
//! use tokio::prelude::*;
//!
//! let mut runtime = Runtime::new().unwrap();
//!
//! // Use the runtime...
//! // runtime.block_on(f); // where f is a future
//! ```
//!
//! [rt]: struct.Runtime.html
//! [concurrent-rt]: ../struct.Runtime.html
//! [chan]: https://docs.rs/futures/0.1/futures/sync/mpsc/fn.channel.html
//! [reactor]: ../../reactor/struct.Reactor.html
//! [executor]: https://tokio.rs/docs/getting-started/runtime-model/#executors
//! [timer]: ../../timer/index.html
mod builder;
mod runtime;
pub use self::builder::Builder;
pub use self::runtime::{Runtime, Handle};
pub use tokio_current_thread::spawn;
pub use tokio_current_thread::TaskExecutor;
use futures::Future;
/// Run the provided future to completion using a runtime running on the current thread.
///
/// This first creates a new [`Runtime`], and calls [`Runtime::block_on`] with the provided future,
/// which blocks the current thread until the provided future completes. It then calls
/// [`Runtime::run`] to wait for any other spawned futures to resolve.
pub fn block_on_all<F>(future: F) -> Result<F::Item, F::Error>
where
F: Future,
{
let mut r = Runtime::new().expect("failed to start runtime on current thread");
let v = r.block_on(future)?;
r.run().expect("failed to resolve remaining futures");
Ok(v)
}

View File

@@ -1,91 +1,57 @@
use std::error::Error;
use std::{fmt, io};
use futures::Future;
use tokio_current_thread::{self as current_thread, CurrentThread};
use tokio_executor;
use tokio_reactor::{self, Reactor};
use tokio_timer::clock::{self, Clock};
use tokio_timer::timer::{self, Timer};
use crate::builder::Builder;
use std::future::Future;
use std::io;
use tokio::{runtime, task::LocalSet};
/// Single-threaded runtime provides a way to start reactor
/// and executor on the current thread.
/// and runtime on the current thread.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
/// [mod]: crate
#[derive(Debug)]
pub struct Runtime {
reactor_handle: tokio_reactor::Handle,
timer_handle: timer::Handle,
clock: Clock,
executor: CurrentThread<Timer<Reactor>>,
}
/// Error returned by the `run` function.
#[derive(Debug)]
pub struct RunError {
inner: current_thread::RunError,
}
impl fmt::Display for RunError {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
write!(fmt, "{}", self.inner)
}
}
impl Error for RunError {
fn description(&self) -> &str {
self.inner.description()
}
fn cause(&self) -> Option<&dyn Error> {
self.inner.source()
}
local: LocalSet,
rt: runtime::Runtime,
}
impl Runtime {
#[allow(clippy::new_ret_no_self)]
/// Returns a new runtime initialized with default configuration values.
pub fn new() -> io::Result<Runtime> {
Builder::new().build_rt()
let rt = runtime::Builder::new_current_thread()
.enable_io()
.enable_time()
.build()?;
Ok(Runtime {
rt,
local: LocalSet::new(),
})
}
pub(super) fn new2(
reactor_handle: tokio_reactor::Handle,
timer_handle: timer::Handle,
clock: Clock,
executor: CurrentThread<Timer<Reactor>>,
) -> Runtime {
Runtime {
reactor_handle,
timer_handle,
clock,
executor,
}
pub(super) fn local(&self) -> &LocalSet {
&self.local
}
/// Spawn a future onto the single-threaded Tokio runtime.
/// Spawn a future onto the single-threaded runtime.
///
/// See [module level][mod] documentation for more details.
///
/// [mod]: index.html
/// [mod]: crate
///
/// # Examples
///
/// ```rust
/// ```ignore
/// # use futures::{future, Future, Stream};
/// use actix_rt::Runtime;
///
/// # fn dox() {
/// // Create the runtime
/// let mut rt = Runtime::new().unwrap();
/// let rt = Runtime::new().unwrap();
///
/// // Spawn a future onto the runtime
/// rt.spawn(future::lazy(|| {
/// rt.spawn(future::lazy(|_| {
/// println!("running on the runtime");
/// Ok(())
/// }));
/// # }
/// # pub fn main() {}
@@ -95,11 +61,11 @@ impl Runtime {
///
/// This function panics if the spawn fails. Failure occurs if the executor
/// is currently at capacity and is unable to spawn a new future.
pub fn spawn<F>(&mut self, future: F) -> &mut Self
pub fn spawn<F>(&self, future: F) -> &Self
where
F: Future<Item = (), Error = ()> + 'static,
F: Future<Output = ()> + 'static,
{
self.executor.spawn(future);
self.local.spawn_local(future);
self
}
@@ -119,56 +85,10 @@ impl Runtime {
///
/// The caller is responsible for ensuring that other spawned futures
/// complete execution by calling `block_on` or `run`.
pub fn block_on<F>(&mut self, f: F) -> Result<F::Item, F::Error>
pub fn block_on<F>(&self, f: F) -> F::Output
where
F: Future,
{
self.enter(|executor| {
// Run the provided future
let ret = executor.block_on(f);
ret.map_err(|e| e.into_inner().expect("unexpected execution error"))
})
}
/// Run the executor to completion, blocking the thread until **all**
/// spawned futures have completed.
pub fn run(&mut self) -> Result<(), RunError> {
self.enter(|executor| executor.run())
.map_err(|e| RunError { inner: e })
}
fn enter<F, R>(&mut self, f: F) -> R
where
F: FnOnce(&mut current_thread::Entered<Timer<Reactor>>) -> R,
{
let Runtime {
ref reactor_handle,
ref timer_handle,
ref clock,
ref mut executor,
..
} = *self;
// Binds an executor to this thread
let mut enter = tokio_executor::enter().expect("Multiple executors at once");
// This will set the default handle and timer to use inside the closure
// and run the future.
tokio_reactor::with_default(&reactor_handle, &mut enter, |enter| {
clock::with_default(clock, enter, |enter| {
timer::with_default(&timer_handle, enter, |enter| {
// The TaskExecutor is a fake executor that looks into the
// current single-threaded executor when used. This is a trick,
// because we need two mutable references to the executor (one
// to run the provided future, another to install as the default
// one). We use the fake one here as the default one.
let mut default_executor = current_thread::TaskExecutor::current();
tokio_executor::with_default(&mut default_executor, enter, |enter| {
let mut executor = executor.enter(enter);
f(&mut executor)
})
})
})
})
self.local.block_on(&self.rt, f)
}
}

View File

@@ -1,10 +1,10 @@
use std::cell::RefCell;
use std::future::Future;
use std::io;
use std::sync::atomic::{AtomicUsize, Ordering};
use futures::sync::mpsc::UnboundedSender;
use futures::Future;
use tokio_current_thread::Handle;
use tokio::sync::mpsc::UnboundedSender;
use tokio::task::LocalSet;
use crate::arbiter::{Arbiter, SystemCommand};
use crate::builder::{Builder, SystemRunner};
@@ -57,20 +57,136 @@ impl System {
Self::builder().name(name).build()
}
#[allow(clippy::new_ret_no_self)]
/// Create new system using provided CurrentThread Handle.
/// Create new system using provided tokio `LocalSet`.
///
/// This method panics if it can not spawn system arbiter
pub fn run_in_executor<T: Into<String>>(
///
/// Note: This method uses provided `LocalSet` to create a `System` future only.
/// All the [`Arbiter`]s will be started in separate threads using their own tokio `Runtime`s.
/// It means that using this method currently it is impossible to make `actix-rt` work in the
/// alternative `tokio` `Runtime`s (e.g. provided by [`tokio_compat`]).
///
/// [`tokio_compat`]: https://crates.io/crates/tokio-compat
///
/// # Examples
///
/// ```ignore
/// use tokio::{runtime::Runtime, task::LocalSet};
/// use actix_rt::System;
/// use futures_util::future::try_join_all;
///
/// async fn run_application() {
/// let first_task = tokio::spawn(async {
/// // ...
/// # println!("One task");
/// # Ok::<(),()>(())
/// });
///
/// let second_task = tokio::spawn(async {
/// // ...
/// # println!("Another task");
/// # Ok::<(),()>(())
/// });
///
/// try_join_all(vec![first_task, second_task])
/// .await
/// .expect("Some of the futures finished unexpectedly");
/// }
///
///
/// let runtime = tokio::runtime::Builder::new_multi_thread()
/// .worker_threads(2)
/// .enable_all()
/// .build()
/// .unwrap();
///
///
/// let actix_system_task = LocalSet::new();
/// let sys = System::run_in_tokio("actix-main-system", &actix_system_task);
/// actix_system_task.spawn_local(sys);
///
/// let rest_operations = run_application();
/// runtime.block_on(actix_system_task.run_until(rest_operations));
/// ```
pub fn run_in_tokio<T: Into<String>>(
name: T,
executor: Handle,
) -> impl Future<Item = (), Error = io::Error> + Send {
local: &LocalSet,
) -> impl Future<Output = io::Result<()>> {
Self::builder()
.name(name)
.build_async(executor)
.build_async(local)
.run_nonblocking()
}
/// Consume the provided tokio Runtime and start the `System` in it.
/// This method will create a `LocalSet` object and occupy the current thread
/// for the created `System` exclusively. All the other asynchronous tasks that
/// should be executed as well must be aggregated into one future, provided as the last
/// argument to this method.
///
/// Note: This method uses provided `Runtime` to create a `System` future only.
/// All the [`Arbiter`]s will be started in separate threads using their own tokio `Runtime`s.
/// It means that using this method currently it is impossible to make `actix-rt` work in the
/// alternative `tokio` `Runtime`s (e.g. provided by `tokio_compat`).
///
/// [`tokio_compat`]: https://crates.io/crates/tokio-compat
///
/// # Arguments
///
/// - `name`: Name of the System
/// - `runtime`: A tokio Runtime to run the system in.
/// - `rest_operations`: A future to be executed in the runtime along with the System.
///
/// # Examples
///
/// ```ignore
/// use tokio::runtime::Runtime;
/// use actix_rt::System;
/// use futures_util::future::try_join_all;
///
/// async fn run_application() {
/// let first_task = tokio::spawn(async {
/// // ...
/// # println!("One task");
/// # Ok::<(),()>(())
/// });
///
/// let second_task = tokio::spawn(async {
/// // ...
/// # println!("Another task");
/// # Ok::<(),()>(())
/// });
///
/// try_join_all(vec![first_task, second_task])
/// .await
/// .expect("Some of the futures finished unexpectedly");
/// }
///
///
/// let runtime = tokio::runtime::Builder::new_multi_thread()
/// .worker_threads(2)
/// .enable_all()
/// .build()
/// .unwrap();
///
/// let rest_operations = run_application();
/// System::attach_to_tokio("actix-main-system", runtime, rest_operations);
/// ```
pub fn attach_to_tokio<Fut, R>(
name: impl Into<String>,
runtime: tokio::runtime::Runtime,
rest_operations: Fut,
) -> R
where
Fut: std::future::Future<Output = R>,
{
let actix_system_task = LocalSet::new();
let sys = System::run_in_tokio(name.into(), &actix_system_task);
actix_system_task.spawn_local(sys);
runtime.block_on(actix_system_task.run_until(rest_operations))
}
/// Get current running system.
pub fn current() -> System {
CURRENT.with(|cell| match *cell.borrow() {
@@ -79,8 +195,8 @@ impl System {
})
}
/// Set current running system.
pub(crate) fn is_set() -> bool {
/// Check if current system is set, i.e., as already been started.
pub fn is_set() -> bool {
CURRENT.with(|cell| cell.borrow().is_some())
}
@@ -115,7 +231,7 @@ impl System {
/// Stop the system with a particular exit code.
pub fn stop_with_code(&self, code: i32) {
let _ = self.sys.unbounded_send(SystemCommand::Exit(code));
let _ = self.sys.send(SystemCommand::Exit(code));
}
pub(crate) fn sys(&self) -> &UnboundedSender<SystemCommand> {
@@ -138,7 +254,7 @@ impl System {
/// Function `f` get called within tokio runtime context.
pub fn run<F>(f: F) -> io::Result<()>
where
F: FnOnce() + 'static,
F: FnOnce(),
{
Self::builder().run(f)
}

View File

@@ -0,0 +1,126 @@
use std::time::{Duration, Instant};
#[test]
fn await_for_timer() {
let time = Duration::from_secs(2);
let instant = Instant::now();
actix_rt::System::new("test_wait_timer").block_on(async move {
tokio::time::sleep(time).await;
});
assert!(
instant.elapsed() >= time,
"Block on should poll awaited future to completion"
);
}
#[test]
fn join_another_arbiter() {
let time = Duration::from_secs(2);
let instant = Instant::now();
actix_rt::System::new("test_join_another_arbiter").block_on(async move {
let mut arbiter = actix_rt::Arbiter::new();
arbiter.send(Box::pin(async move {
tokio::time::sleep(time).await;
actix_rt::Arbiter::current().stop();
}));
arbiter.join().unwrap();
});
assert!(
instant.elapsed() >= time,
"Join on another arbiter should complete only when it calls stop"
);
let instant = Instant::now();
actix_rt::System::new("test_join_another_arbiter").block_on(async move {
let mut arbiter = actix_rt::Arbiter::new();
arbiter.exec_fn(move || {
actix_rt::spawn(async move {
tokio::time::sleep(time).await;
actix_rt::Arbiter::current().stop();
});
});
arbiter.join().unwrap();
});
assert!(
instant.elapsed() >= time,
"Join on a arbiter that has used actix_rt::spawn should wait for said future"
);
let instant = Instant::now();
actix_rt::System::new("test_join_another_arbiter").block_on(async move {
let mut arbiter = actix_rt::Arbiter::new();
arbiter.send(Box::pin(async move {
tokio::time::sleep(time).await;
actix_rt::Arbiter::current().stop();
}));
arbiter.stop();
arbiter.join().unwrap();
});
assert!(
instant.elapsed() < time,
"Premature stop of arbiter should conclude regardless of it's current state"
);
}
// #[test]
// fn join_current_arbiter() {
// let time = Duration::from_secs(2);
//
// let instant = Instant::now();
// actix_rt::System::new("test_join_current_arbiter").block_on(async move {
// actix_rt::spawn(async move {
// tokio::time::delay_for(time).await;
// actix_rt::Arbiter::current().stop();
// });
// actix_rt::Arbiter::local_join().await;
// });
// assert!(
// instant.elapsed() >= time,
// "Join on current arbiter should wait for all spawned futures"
// );
//
// let large_timer = Duration::from_secs(20);
// let instant = Instant::now();
// actix_rt::System::new("test_join_current_arbiter").block_on(async move {
// actix_rt::spawn(async move {
// tokio::time::delay_for(time).await;
// actix_rt::Arbiter::current().stop();
// });
// let f = actix_rt::Arbiter::local_join();
// actix_rt::spawn(async move {
// tokio::time::delay_for(large_timer).await;
// actix_rt::Arbiter::current().stop();
// });
// f.await;
// });
// assert!(
// instant.elapsed() < large_timer,
// "local_join should await only for the already spawned futures"
// );
// }
#[test]
fn non_static_block_on() {
let string = String::from("test_str");
let str = string.as_str();
let sys = actix_rt::System::new("borrow some");
sys.block_on(async {
actix_rt::time::sleep(Duration::from_millis(1)).await;
assert_eq!("test_str", str);
});
let rt = actix_rt::Runtime::new().unwrap();
rt.block_on(async {
actix_rt::time::sleep(Duration::from_millis(1)).await;
assert_eq!("test_str", str);
});
actix_rt::System::run(|| {
assert_eq!("test_str", str);
actix_rt::System::current().stop();
})
.unwrap();
}

View File

@@ -1,38 +0,0 @@
[package]
name = "actix-server-config"
version = "0.1.2"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix server config utils"
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
license = "MIT/Apache-2.0"
edition = "2018"
workspace = ".."
[lib]
name = "actix_server_config"
path = "src/lib.rs"
[package.metadata.docs.rs]
features = ["ssl", "rust-tls", "uds"]
[features]
default = []
# openssl
ssl = ["tokio-openssl"]
# rustls
rust-tls = ["rustls", "tokio-rustls"]
# unix domain sockets
uds = ["tokio-uds"]
[dependencies]
futures = "0.1.25"
tokio-io = "0.1.12"
tokio-tcp = "0.1"
tokio-openssl = { version="0.3.0", optional = true }
rustls = { version = "0.15.2", optional = true }
tokio-rustls = { version = "0.9.1", optional = true }
tokio-uds = { version="0.2.5", optional = true }

View File

@@ -1,14 +0,0 @@
# Changes
## [0.1.2] - 2019-07-18
### Added
* Add unix domnain sockets support
## [0.1.1] - 2019-04-16
### Added
* `IoStream` trait and impls for TcpStream, SslStream and TlsStream

View File

@@ -1,241 +0,0 @@
use std::cell::Cell;
use std::net::SocketAddr;
use std::rc::Rc;
use std::{fmt, io, net, time};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_tcp::TcpStream;
#[derive(Debug, Clone)]
pub struct ServerConfig {
addr: SocketAddr,
secure: Rc<Cell<bool>>,
}
impl ServerConfig {
pub fn new(addr: SocketAddr) -> Self {
ServerConfig {
addr,
secure: Rc::new(Cell::new(false)),
}
}
/// Returns the address of the local half of this TCP server socket
pub fn local_addr(&self) -> SocketAddr {
self.addr
}
/// Returns true if connection is secure (tls enabled)
pub fn secure(&self) -> bool {
self.secure.as_ref().get()
}
/// Set secure flag
pub fn set_secure(&self) {
self.secure.as_ref().set(true)
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq, Hash)]
pub enum Protocol {
Unknown,
Http10,
Http11,
Http2,
Proto1,
Proto2,
Proto3,
Proto4,
Proto5,
Proto6,
}
pub struct Io<T, P = ()> {
io: T,
proto: Protocol,
params: P,
}
impl<T> Io<T, ()> {
pub fn new(io: T) -> Self {
Self {
io,
proto: Protocol::Unknown,
params: (),
}
}
}
impl<T, P> Io<T, P> {
/// Reconstruct from a parts.
pub fn from_parts(io: T, params: P, proto: Protocol) -> Self {
Self { io, params, proto }
}
/// Deconstruct into a parts.
pub fn into_parts(self) -> (T, P, Protocol) {
(self.io, self.params, self.proto)
}
/// Returns a shared reference to the underlying stream.
pub fn get_ref(&self) -> &T {
&self.io
}
/// Returns a mutable reference to the underlying stream.
pub fn get_mut(&mut self) -> &mut T {
&mut self.io
}
/// Get selected protocol
pub fn protocol(&self) -> Protocol {
self.proto
}
/// Return new Io object with new parameter.
pub fn set<U>(self, params: U) -> Io<T, U> {
Io {
params,
io: self.io,
proto: self.proto,
}
}
/// Maps an Io<_, P> to Io<_, U> by applying a function to a contained value.
pub fn map<U, F>(self, op: F) -> Io<T, U>
where
F: FnOnce(P) -> U,
{
Io {
io: self.io,
proto: self.proto,
params: op(self.params),
}
}
}
impl<T, P> std::ops::Deref for Io<T, P> {
type Target = T;
fn deref(&self) -> &T {
&self.io
}
}
impl<T, P> std::ops::DerefMut for Io<T, P> {
fn deref_mut(&mut self) -> &mut T {
&mut self.io
}
}
impl<T: fmt::Debug, P> fmt::Debug for Io<T, P> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
write!(f, "Io {{{:?}}}", self.io)
}
}
/// Low-level io stream operations
pub trait IoStream: AsyncRead + AsyncWrite {
/// Returns the socket address of the remote peer of this TCP connection.
fn peer_addr(&self) -> Option<SocketAddr> {
None
}
/// Sets the value of the TCP_NODELAY option on this socket.
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()>;
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()>;
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()>;
}
impl IoStream for TcpStream {
#[inline]
fn peer_addr(&self) -> Option<net::SocketAddr> {
TcpStream::peer_addr(self).ok()
}
#[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
TcpStream::set_nodelay(self, nodelay)
}
#[inline]
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
TcpStream::set_linger(self, dur)
}
#[inline]
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
TcpStream::set_keepalive(self, dur)
}
}
#[cfg(any(feature = "ssl"))]
impl<T: IoStream> IoStream for tokio_openssl::SslStream<T> {
#[inline]
fn peer_addr(&self) -> Option<net::SocketAddr> {
self.get_ref().get_ref().peer_addr()
}
#[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
self.get_mut().get_mut().set_nodelay(nodelay)
}
#[inline]
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_linger(dur)
}
#[inline]
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().get_mut().set_keepalive(dur)
}
}
#[cfg(any(feature = "rust-tls"))]
impl<T: IoStream> IoStream for tokio_rustls::TlsStream<T, rustls::ServerSession> {
#[inline]
fn peer_addr(&self) -> Option<net::SocketAddr> {
self.get_ref().0.peer_addr()
}
#[inline]
fn set_nodelay(&mut self, nodelay: bool) -> io::Result<()> {
self.get_mut().0.set_nodelay(nodelay)
}
#[inline]
fn set_linger(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().0.set_linger(dur)
}
#[inline]
fn set_keepalive(&mut self, dur: Option<time::Duration>) -> io::Result<()> {
self.get_mut().0.set_keepalive(dur)
}
}
#[cfg(all(unix, feature = "uds"))]
impl IoStream for tokio_uds::UnixStream {
#[inline]
fn peer_addr(&self) -> Option<net::SocketAddr> {
None
}
#[inline]
fn set_nodelay(&mut self, _: bool) -> io::Result<()> {
Ok(())
}
#[inline]
fn set_linger(&mut self, _: Option<time::Duration>) -> io::Result<()> {
Ok(())
}
#[inline]
fn set_keepalive(&mut self, _: Option<time::Duration>) -> io::Result<()> {
Ok(())
}
}

View File

@@ -1,131 +1,153 @@
# Changes
## [0.6.0] - 2019-07-18
## Unreleased - 2021-xx-xx
* Hidden `ServerBuilder::start` method has been removed. Use `ServerBuilder::run`. [#246]
### Added
[#246]: https://github.com/actix/actix-net/pull/246
## 2.0.0-beta.2 - 2021-01-03
* Merge `actix-testing` to `actix-server` as `test_server` mod. [#242]
[#242]: https://github.com/actix/actix-net/pull/242
## 2.0.0-beta.1 - 2020-12-28
* Added explicit info log message on accept queue pause. [#215]
* Prevent double registration of sockets when back-pressure is resolved. [#223]
* Update `mio` dependency to `0.7.3`. [#239]
* Remove `socket2` dependency. [#239]
* `ServerBuilder::backlog` now accepts `u32` instead of `i32`. [#239]
* Remove `AcceptNotify` type and pass `WakerQueue` to `Worker` to wake up `Accept`'s `Poll`. [#239]
* Convert `mio::net::TcpStream` to `actix_rt::net::TcpStream`(`UnixStream` for uds) using
`FromRawFd` and `IntoRawFd`(`FromRawSocket` and `IntoRawSocket` on windows). [#239]
* Remove `AsyncRead` and `AsyncWrite` trait bound for `socket::FromStream` trait. [#239]
[#215]: https://github.com/actix/actix-net/pull/215
[#223]: https://github.com/actix/actix-net/pull/223
[#239]: https://github.com/actix/actix-net/pull/239
## 1.0.4 - 2020-09-12
* Update actix-codec to 0.3.0.
* Workers must be greater than 0. [#167]
[#167]: https://github.com/actix/actix-net/pull/167
## 1.0.3 - 2020-05-19
* Replace deprecated `net2` crate with `socket2` [#140]
[#140]: https://github.com/actix/actix-net/pull/140
## 1.0.2 - 2020-02-26
* Avoid error by calling `reregister()` on Windows [#103]
[#103]: https://github.com/actix/actix-net/pull/103
## 1.0.1 - 2019-12-29
* Rename `.start()` method to `.run()`
## 1.0.0 - 2019-12-11
* Use actix-net releases
## 1.0.0-alpha.4 - 2019-12-08
* Use actix-service 1.0.0-alpha.4
## 1.0.0-alpha.3 - 2019-12-07
* Migrate to tokio 0.2
* Fix compilation on non-unix platforms
* Better handling server configuration
## 1.0.0-alpha.2 - 2019-12-02
* Simplify server service (remove actix-server-config)
* Allow to wait on `Server` until server stops
## 0.8.0-alpha.1 - 2019-11-22
* Migrate to `std::future`
## 0.7.0 - 2019-10-04
* Update `rustls` to 0.16
* Minimum required Rust version upped to 1.37.0
## 0.6.1 - 2019-09-25
* Add UDS listening support to `ServerBuilder`
## 0.6.0 - 2019-07-18
* Support Unix domain sockets #3
## [0.5.1] - 2019-05-18
### Changed
## 0.5.1 - 2019-05-18
* ServerBuilder::shutdown_timeout() accepts u64
## [0.5.0] - 2019-05-12
### Added
## 0.5.0 - 2019-05-12
* Add `Debug` impl for `SslError`
* Derive debug for `Server` and `ServerCommand`
### Changed
* Upgrade to actix-service 0.4
## [0.4.3] - 2019-04-16
### Added
## 0.4.3 - 2019-04-16
* Re-export `IoStream` trait
### Changed
* Deppend on `ssl` and `rust-tls` features from actix-server-config
* Depend on `ssl` and `rust-tls` features from actix-server-config
## [0.4.2] - 2019-03-30
### Fixed
## 0.4.2 - 2019-03-30
* Fix SIGINT force shutdown
## [0.4.1] - 2019-03-14
### Added
## 0.4.1 - 2019-03-14
* `SystemRuntime::on_start()` - allow to run future before server service initialization
## [0.4.0] - 2019-03-12
### Changed
## 0.4.0 - 2019-03-12
* Use `ServerConfig` for service factory
* Wrap tcp socket to `Io` type
* Upgrade actix-service
## [0.3.1] - 2019-03-04
### Added
## 0.3.1 - 2019-03-04
* Add `ServerBuilder::maxconnrate` sets the maximum per-worker number of concurrent connections
* Add helper ssl error `SslError`
### Changed
* Rename `StreamServiceFactory` to `ServiceFactory`
* Deprecate `StreamServiceFactory`
## [0.3.0] - 2019-03-02
### Changed
## 0.3.0 - 2019-03-02
* Use new `NewService` trait
## [0.2.1] - 2019-02-09
### Changed
## 0.2.1 - 2019-02-09
* Drop service response
## [0.2.0] - 2019-02-01
### Changed
## 0.2.0 - 2019-02-01
* Migrate to actix-service 0.2
* Updated rustls dependency
## [0.1.3] - 2018-12-21
### Fixed
## 0.1.3 - 2018-12-21
* Fix max concurrent connections handling
## [0.1.2] - 2018-12-12
### Changed
## 0.1.2 - 2018-12-12
* rename ServiceConfig::rt() to ServiceConfig::apply()
### Fixed
* Fix back-pressure for concurrent ssl handshakes
## [0.1.1] - 2018-12-11
## 0.1.1 - 2018-12-11
* Fix signal handling on windows
## [0.1.0] - 2018-12-09
## 0.1.0 - 2018-12-09
* Move server to separate crate

View File

@@ -1,20 +1,19 @@
[package]
name = "actix-server"
version = "0.6.0"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix server - General purpose tcp server"
version = "2.0.0-beta.2"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"fakeshadow <24548779@qq.com>",
]
description = "General purpose TCP server built for the Actix ecosystem"
keywords = ["network", "framework", "async", "futures"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-server/"
categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0"
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
license = "MIT OR Apache-2.0"
exclude = [".gitignore", ".cargo/config"]
edition = "2018"
workspace = ".."
[package.metadata.docs.rs]
features = ["ssl", "tls", "rust-tls", "uds"]
[lib]
name = "actix_server"
@@ -23,54 +22,21 @@ path = "src/lib.rs"
[features]
default = []
# tls
tls = ["native-tls"]
# openssl
ssl = ["openssl", "tokio-openssl", "actix-server-config/ssl"]
# rustls
rust-tls = ["rustls", "tokio-rustls", "webpki", "webpki-roots", "actix-server-config/rust-tls"]
# uds
uds = ["mio-uds", "tokio-uds", "actix-server-config/uds"]
[dependencies]
actix-rt = "0.2.2"
actix-service = "0.4.1"
actix-server-config = "0.1.2"
actix-codec = "0.4.0-beta.1"
actix-rt = "2.0.0-beta.1"
actix-service = "2.0.0-beta.2"
actix-utils = "3.0.0-beta.1"
futures-core = { version = "0.3.7", default-features = false }
log = "0.4"
num_cpus = "1.0"
mio = "0.6.19"
net2 = "0.2"
futures = "0.1"
mio = { version = "0.7.6", features = ["os-poll", "net"] }
num_cpus = "1.13"
slab = "0.4"
tokio-io = "0.1"
tokio-tcp = "0.1"
tokio-timer = "0.2.8"
tokio-reactor = "0.1"
tokio-signal = "0.2"
# unix domain sockets
mio-uds = { version="0.6.7", optional = true }
tokio-uds = { version="0.2.5", optional = true }
# native-tls
native-tls = { version="0.2", optional = true }
# openssl
openssl = { version="0.10", optional = true }
tokio-openssl = { version="0.3", optional = true }
# rustls
rustls = { version = "0.15.2", optional = true }
tokio-rustls = { version = "0.9.1", optional = true }
webpki = { version = "0.19", optional = true }
webpki-roots = { version = "0.16", optional = true }
tokio = { version = "1", features = ["sync"] }
[dev-dependencies]
bytes = "0.4"
actix-codec = "0.1.2"
env_logger = "0.6"
bytes = "1"
env_logger = "0.8"
futures-util = { version = "0.3.7", default-features = false, features = ["sink"] }
tokio = { version = "1", features = ["io-util"] }

View File

@@ -0,0 +1,88 @@
//! Simple composite-service TCP echo server.
//!
//! Using the following command:
//!
//! ```sh
//! nc 127.0.0.1 8080
//! ```
//!
//! Start typing. When you press enter the typed line will be echoed back. The server will log
//! the length of each line it echos and the total size of data sent when the connection is closed.
use std::sync::{
atomic::{AtomicUsize, Ordering},
Arc,
};
use std::{env, io};
use actix_rt::net::TcpStream;
use actix_server::Server;
use actix_service::pipeline_factory;
use bytes::BytesMut;
use futures_util::future::ok;
use log::{error, info};
use tokio::io::{AsyncReadExt, AsyncWriteExt};
#[actix_rt::main]
async fn main() -> io::Result<()> {
env::set_var("RUST_LOG", "actix=trace,basic=trace");
env_logger::init();
let count = Arc::new(AtomicUsize::new(0));
let addr = ("127.0.0.1", 8080);
info!("starting server on port: {}", &addr.0);
// Bind socket address and start worker(s). By default, the server uses the number of available
// logical CPU cores as the worker count. For this reason, the closure passed to bind needs
// to return a service *factory*; so it can be created once per worker.
Server::build()
.bind("echo", addr, move || {
let count = Arc::clone(&count);
let num2 = Arc::clone(&count);
pipeline_factory(move |mut stream: TcpStream| {
let count = Arc::clone(&count);
async move {
let num = count.fetch_add(1, Ordering::SeqCst);
let num = num + 1;
let mut size = 0;
let mut buf = BytesMut::new();
loop {
match stream.read_buf(&mut buf).await {
// end of stream; bail from loop
Ok(0) => break,
// more bytes to process
Ok(bytes_read) => {
info!("[{}] read {} bytes", num, bytes_read);
stream.write_all(&buf[size..]).await.unwrap();
size += bytes_read;
}
// stream error; bail from loop with error
Err(err) => {
error!("Stream Error: {:?}", err);
return Err(());
}
}
}
// send data down service pipeline
Ok((buf.freeze(), size))
}
})
.map_err(|err| error!("Service Error: {:?}", err))
.and_then(move |(_, size)| {
let num = num2.load(Ordering::SeqCst);
info!("[{}] total bytes read: {}", num, size);
ok(size)
})
})?
.workers(1)
.run()
.await
}

View File

@@ -1,121 +1,86 @@
use std::sync::mpsc as sync_mpsc;
use std::time::{Duration, Instant};
use std::time::Duration;
use std::{io, thread};
use actix_rt::time::{sleep_until, Instant};
use actix_rt::System;
use futures::future::{lazy, Future};
use log::{error, info};
use mio::{Interest, Poll, Token as MioToken};
use slab::Slab;
use tokio_timer::Delay;
use crate::server::Server;
use crate::socket::{SocketAddr, SocketListener, StdListener};
use crate::worker::{Conn, WorkerClient};
use crate::socket::{MioListener, SocketAddr};
use crate::waker_queue::{WakerInterest, WakerQueue, WAKER_TOKEN};
use crate::worker::{Conn, WorkerHandle};
use crate::Token;
pub(crate) enum Command {
Pause,
Resume,
Stop,
Worker(WorkerClient),
}
struct ServerSocketInfo {
// addr for socket. mainly used for logging.
addr: SocketAddr,
// be ware this is the crate token for identify socket and should not be confused with
// mio::Token
token: Token,
sock: SocketListener,
lst: MioListener,
// timeout is used to mark the deadline when this socket's listener should be registered again
// after an error.
timeout: Option<Instant>,
}
#[derive(Clone)]
pub(crate) struct AcceptNotify(mio::SetReadiness);
impl AcceptNotify {
pub(crate) fn new(ready: mio::SetReadiness) -> Self {
AcceptNotify(ready)
}
pub(crate) fn notify(&self) {
let _ = self.0.set_readiness(mio::Ready::readable());
}
}
impl Default for AcceptNotify {
fn default() -> Self {
AcceptNotify::new(mio::Registration::new2().1)
}
}
/// Accept loop would live with `ServerBuilder`.
///
/// It's tasked with construct `Poll` instance and `WakerQueue` which would be distributed to
/// `Accept` and `Worker`.
///
/// It would also listen to `ServerCommand` and push interests to `WakerQueue`.
pub(crate) struct AcceptLoop {
cmd_reg: Option<mio::Registration>,
cmd_ready: mio::SetReadiness,
notify_reg: Option<mio::Registration>,
notify_ready: mio::SetReadiness,
tx: sync_mpsc::Sender<Command>,
rx: Option<sync_mpsc::Receiver<Command>>,
srv: Option<Server>,
poll: Option<Poll>,
waker: WakerQueue,
}
impl AcceptLoop {
pub fn new(srv: Server) -> AcceptLoop {
let (tx, rx) = sync_mpsc::channel();
let (cmd_reg, cmd_ready) = mio::Registration::new2();
let (notify_reg, notify_ready) = mio::Registration::new2();
pub fn new(srv: Server) -> Self {
let poll = Poll::new().unwrap_or_else(|e| panic!("Can not create `mio::Poll`: {}", e));
let waker = WakerQueue::new(poll.registry())
.unwrap_or_else(|e| panic!("Can not create `mio::Waker`: {}", e));
AcceptLoop {
tx,
cmd_ready,
cmd_reg: Some(cmd_reg),
notify_ready,
notify_reg: Some(notify_reg),
rx: Some(rx),
Self {
srv: Some(srv),
poll: Some(poll),
waker,
}
}
pub fn send(&self, msg: Command) {
let _ = self.tx.send(msg);
let _ = self.cmd_ready.set_readiness(mio::Ready::readable());
pub(crate) fn waker_owned(&self) -> WakerQueue {
self.waker.clone()
}
pub fn get_notify(&self) -> AcceptNotify {
AcceptNotify::new(self.notify_ready.clone())
pub fn wake(&self, i: WakerInterest) {
self.waker.wake(i);
}
pub(crate) fn start(
&mut self,
socks: Vec<(Token, StdListener)>,
workers: Vec<WorkerClient>,
socks: Vec<(Token, MioListener)>,
handles: Vec<WorkerHandle>,
) {
let srv = self.srv.take().expect("Can not re-use AcceptInfo");
let poll = self.poll.take().unwrap();
let waker = self.waker.clone();
Accept::start(
self.rx.take().expect("Can not re-use AcceptInfo"),
self.cmd_reg.take().expect("Can not re-use AcceptInfo"),
self.notify_reg.take().expect("Can not re-use AcceptInfo"),
socks,
srv,
workers,
);
Accept::start(poll, waker, socks, srv, handles);
}
}
/// poll instance of the server.
struct Accept {
poll: mio::Poll,
rx: sync_mpsc::Receiver<Command>,
sockets: Slab<ServerSocketInfo>,
workers: Vec<WorkerClient>,
poll: Poll,
waker: WakerQueue,
handles: Vec<WorkerHandle>,
srv: Server,
timer: (mio::Registration, mio::SetReadiness),
next: usize,
backpressure: bool,
}
const DELTA: usize = 100;
const CMD: mio::Token = mio::Token(0);
const TIMER: mio::Token = mio::Token(1);
const NOTIFY: mio::Token = mio::Token(2);
/// This function defines errors that are per-connection. Which basically
/// means that if we get this error from `accept()` system call it means
/// next connection might be ready to be accepted.
@@ -130,298 +95,290 @@ fn connection_error(e: &io::Error) -> bool {
}
impl Accept {
#![allow(clippy::too_many_arguments)]
pub(crate) fn start(
rx: sync_mpsc::Receiver<Command>,
cmd_reg: mio::Registration,
notify_reg: mio::Registration,
socks: Vec<(Token, StdListener)>,
poll: Poll,
waker: WakerQueue,
socks: Vec<(Token, MioListener)>,
srv: Server,
workers: Vec<WorkerClient>,
handles: Vec<WorkerHandle>,
) {
// Accept runs in its own thread and would want to spawn additional futures to current
// actix system.
let sys = System::current();
// start accept thread
let _ = thread::Builder::new()
thread::Builder::new()
.name("actix-server accept loop".to_owned())
.spawn(move || {
System::set_current(sys);
let mut accept = Accept::new(rx, socks, workers, srv);
// Start listening for incoming commands
if let Err(err) = accept.poll.register(
&cmd_reg,
CMD,
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register Registration: {}", err);
}
// Start listening for notify updates
if let Err(err) = accept.poll.register(
&notify_reg,
NOTIFY,
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register Registration: {}", err);
}
accept.poll();
});
let (mut accept, sockets) =
Accept::new_with_sockets(poll, waker, socks, handles, srv);
accept.poll_with(sockets);
})
.unwrap();
}
fn new(
rx: sync_mpsc::Receiver<Command>,
socks: Vec<(Token, StdListener)>,
workers: Vec<WorkerClient>,
fn new_with_sockets(
poll: Poll,
waker: WakerQueue,
socks: Vec<(Token, MioListener)>,
handles: Vec<WorkerHandle>,
srv: Server,
) -> Accept {
// Create a poll instance
let poll = match mio::Poll::new() {
Ok(poll) => poll,
Err(err) => panic!("Can not create mio::Poll: {}", err),
};
// Start accept
) -> (Accept, Slab<ServerSocketInfo>) {
let mut sockets = Slab::new();
for (hnd_token, lst) in socks.into_iter() {
for (hnd_token, mut lst) in socks.into_iter() {
let addr = lst.local_addr();
let server = lst.into_listener();
let entry = sockets.vacant_entry();
let token = entry.key();
// Start listening for incoming connections
if let Err(err) = poll.register(
&server,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
panic!("Can not register io: {}", err);
}
poll.registry()
.register(&mut lst, MioToken(token), Interest::READABLE)
.unwrap_or_else(|e| panic!("Can not register io: {}", e));
entry.insert(ServerSocketInfo {
addr,
token: hnd_token,
sock: server,
lst,
timeout: None,
});
}
// Timer
let (tm, tmr) = mio::Registration::new2();
if let Err(err) =
poll.register(&tm, TIMER, mio::Ready::readable(), mio::PollOpt::edge())
{
panic!("Can not register Registration: {}", err);
}
Accept {
let accept = Accept {
poll,
rx,
sockets,
workers,
waker,
handles,
srv,
next: 0,
timer: (tm, tmr),
backpressure: false,
}
};
(accept, sockets)
}
fn poll(&mut self) {
// Create storage for events
fn poll_with(&mut self, mut sockets: Slab<ServerSocketInfo>) {
let mut events = mio::Events::with_capacity(128);
loop {
if let Err(err) = self.poll.poll(&mut events, None) {
panic!("Poll error: {}", err);
}
self.poll
.poll(&mut events, None)
.unwrap_or_else(|e| panic!("Poll error: {}", e));
for event in events.iter() {
let token = event.token();
match token {
CMD => {
if !self.process_cmd() {
return;
// This is a loop because interests for command from previous version was
// a loop that would try to drain the command channel. It's yet unknown
// if it's necessary/good practice to actively drain the waker queue.
WAKER_TOKEN => 'waker: loop {
// take guard with every iteration so no new interest can be added
// until the current task is done.
let mut guard = self.waker.guard();
match guard.pop_front() {
// worker notify it becomes available. we may want to recover
// from backpressure.
Some(WakerInterest::WorkerAvailable) => {
drop(guard);
self.maybe_backpressure(&mut sockets, false);
}
// a new worker thread is made and it's handle would be added
// to Accept
Some(WakerInterest::Worker(handle)) => {
drop(guard);
// maybe we want to recover from a backpressure.
self.maybe_backpressure(&mut sockets, false);
self.handles.push(handle);
}
// got timer interest and it's time to try register socket(s)
// again.
Some(WakerInterest::Timer) => {
drop(guard);
self.process_timer(&mut sockets)
}
Some(WakerInterest::Pause) => {
drop(guard);
sockets.iter_mut().for_each(|(_, info)| {
match self.deregister(info) {
Ok(_) => info!(
"Paused accepting connections on {}",
info.addr
),
Err(e) => {
error!("Can not deregister server socket {}", e)
}
}
});
}
Some(WakerInterest::Resume) => {
drop(guard);
sockets.iter_mut().for_each(|(token, info)| {
self.register_logged(token, info);
});
}
Some(WakerInterest::Stop) => {
return self.deregister_all(&mut sockets);
}
// waker queue is drained.
None => {
// Reset the WakerQueue before break so it does not grow
// infinitely.
WakerQueue::reset(&mut guard);
break 'waker;
}
}
}
TIMER => self.process_timer(),
NOTIFY => self.backpressure(false),
},
_ => {
let token = usize::from(token);
if token < DELTA {
continue;
}
self.accept(token - DELTA);
self.accept(&mut sockets, token);
}
}
}
}
}
fn process_timer(&mut self) {
fn process_timer(&self, sockets: &mut Slab<ServerSocketInfo>) {
let now = Instant::now();
for (token, info) in self.sockets.iter_mut() {
sockets.iter_mut().for_each(|(token, info)| {
// only the ServerSocketInfo have an associate timeout value was de registered.
if let Some(inst) = info.timeout.take() {
if now > inst {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not register server socket {}", err);
} else {
info!("Resume accepting connections on {}", info.addr);
}
self.register_logged(token, info);
} else {
info.timeout = Some(inst);
}
}
});
}
#[cfg(not(target_os = "windows"))]
fn register(&self, token: usize, info: &mut ServerSocketInfo) -> io::Result<()> {
self.poll
.registry()
.register(&mut info.lst, MioToken(token), Interest::READABLE)
}
#[cfg(target_os = "windows")]
fn register(&self, token: usize, info: &mut ServerSocketInfo) -> io::Result<()> {
// On windows, calling register without deregister cause an error.
// See https://github.com/actix/actix-web/issues/905
// Calling reregister seems to fix the issue.
self.poll
.registry()
.register(&mut info.lst, mio::Token(token), Interest::READABLE)
.or_else(|_| {
self.poll.registry().reregister(
&mut info.lst,
mio::Token(token),
Interest::READABLE,
)
})
}
fn register_logged(&self, token: usize, info: &mut ServerSocketInfo) {
match self.register(token, info) {
Ok(_) => info!("Resume accepting connections on {}", info.addr),
Err(e) => error!("Can not register server socket {}", e),
}
}
fn process_cmd(&mut self) -> bool {
loop {
match self.rx.try_recv() {
Ok(cmd) => match cmd {
Command::Pause => {
for (_, info) in self.sockets.iter_mut() {
if let Err(err) = self.poll.deregister(&info.sock) {
error!("Can not deregister server socket {}", err);
} else {
info!("Paused accepting connections on {}", info.addr);
}
}
}
Command::Resume => {
for (token, info) in self.sockets.iter() {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not resume socket accept process: {}", err);
} else {
info!(
"Accepting connections on {} has been resumed",
info.addr
);
}
}
}
Command::Stop => {
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
return false;
}
Command::Worker(worker) => {
self.backpressure(false);
self.workers.push(worker);
}
},
Err(err) => match err {
sync_mpsc::TryRecvError::Empty => break,
sync_mpsc::TryRecvError::Disconnected => {
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
return false;
}
},
}
}
true
fn deregister(&self, info: &mut ServerSocketInfo) -> io::Result<()> {
self.poll.registry().deregister(&mut info.lst)
}
fn backpressure(&mut self, on: bool) {
fn deregister_all(&self, sockets: &mut Slab<ServerSocketInfo>) {
sockets.iter_mut().for_each(|(_, info)| {
info!("Accepting connections on {} has been paused", info.addr);
let _ = self.deregister(info);
});
}
fn maybe_backpressure(&mut self, sockets: &mut Slab<ServerSocketInfo>, on: bool) {
if self.backpressure {
if !on {
self.backpressure = false;
for (token, info) in self.sockets.iter() {
if let Err(err) = self.poll.register(
&info.sock,
mio::Token(token + DELTA),
mio::Ready::readable(),
mio::PollOpt::edge(),
) {
error!("Can not resume socket accept process: {}", err);
} else {
info!("Accepting connections on {} has been resumed", info.addr);
for (token, info) in sockets.iter_mut() {
if info.timeout.is_some() {
// socket will attempt to re-register itself when its timeout completes
continue;
}
self.register_logged(token, info);
}
}
} else if on {
self.backpressure = true;
for (_, info) in self.sockets.iter() {
let _ = self.poll.deregister(&info.sock);
}
self.deregister_all(sockets);
}
}
fn accept_one(&mut self, mut msg: Conn) {
fn accept_one(&mut self, sockets: &mut Slab<ServerSocketInfo>, mut msg: Conn) {
if self.backpressure {
while !self.workers.is_empty() {
match self.workers[self.next].send(msg) {
Ok(_) => (),
while !self.handles.is_empty() {
match self.handles[self.next].send(msg) {
Ok(_) => {
self.set_next();
break;
}
Err(tmp) => {
self.srv.worker_died(self.workers[self.next].idx);
// worker lost contact and could be gone. a message is sent to
// `ServerBuilder` future to notify it a new worker should be made.
// after that remove the fault worker.
self.srv.worker_faulted(self.handles[self.next].idx);
msg = tmp;
self.workers.swap_remove(self.next);
if self.workers.is_empty() {
self.handles.swap_remove(self.next);
if self.handles.is_empty() {
error!("No workers");
return;
} else if self.workers.len() <= self.next {
} else if self.handles.len() <= self.next {
self.next = 0;
}
continue;
}
}
self.next = (self.next + 1) % self.workers.len();
break;
}
} else {
let mut idx = 0;
while idx < self.workers.len() {
while idx < self.handles.len() {
idx += 1;
if self.workers[self.next].available() {
match self.workers[self.next].send(msg) {
if self.handles[self.next].available() {
match self.handles[self.next].send(msg) {
Ok(_) => {
self.next = (self.next + 1) % self.workers.len();
self.set_next();
return;
}
// worker lost contact and could be gone. a message is sent to
// `ServerBuilder` future to notify it a new worker should be made.
// after that remove the fault worker and enter backpressure if necessary.
Err(tmp) => {
self.srv.worker_died(self.workers[self.next].idx);
self.srv.worker_faulted(self.handles[self.next].idx);
msg = tmp;
self.workers.swap_remove(self.next);
if self.workers.is_empty() {
self.handles.swap_remove(self.next);
if self.handles.is_empty() {
error!("No workers");
self.backpressure(true);
self.maybe_backpressure(sockets, true);
return;
} else if self.workers.len() <= self.next {
} else if self.handles.len() <= self.next {
self.next = 0;
}
continue;
}
}
}
self.next = (self.next + 1) % self.workers.len();
self.set_next();
}
// enable backpressure
self.backpressure(true);
self.accept_one(msg);
self.maybe_backpressure(sockets, true);
self.accept_one(sockets, msg);
}
}
fn accept(&mut self, token: usize) {
// set next worker handle that would accept work.
fn set_next(&mut self) {
self.next = (self.next + 1) % self.handles.len();
}
fn accept(&mut self, sockets: &mut Slab<ServerSocketInfo>, token: usize) {
loop {
let msg = if let Some(info) = self.sockets.get_mut(token) {
match info.sock.accept() {
let msg = if let Some(info) = sockets.get_mut(token) {
match info.lst.accept() {
Ok(Some((io, addr))) => Conn {
io,
token: info.token,
@@ -431,22 +388,22 @@ impl Accept {
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => return,
Err(ref e) if connection_error(e) => continue,
Err(e) => {
// deregister listener temporary
error!("Error accepting connection: {}", e);
if let Err(err) = self.poll.deregister(&info.sock) {
if let Err(err) = self.deregister(info) {
error!("Can not deregister server socket {}", err);
}
// sleep after error
// sleep after error. write the timeout to socket info as later the poll
// would need it mark which socket and when it's listener should be
// registered.
info.timeout = Some(Instant::now() + Duration::from_millis(500));
let r = self.timer.1.clone();
System::current().arbiter().send(lazy(move || {
Delay::new(Instant::now() + Duration::from_millis(510))
.map_err(|_| ())
.and_then(move |_| {
let _ = r.set_readiness(mio::Ready::readable());
Ok(())
})
// after the sleep a Timer interest is sent to Accept Poll
let waker = self.waker.clone();
System::current().arbiter().send(Box::pin(async move {
sleep_until(Instant::now() + Duration::from_millis(510)).await;
waker.wake(WakerInterest::Timer);
}));
return;
}
@@ -455,7 +412,7 @@ impl Accept {
return;
};
self.accept_one(msg);
self.accept_one(sockets, msg);
}
}
}

View File

@@ -1,40 +1,42 @@
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use std::time::Duration;
use std::{io, mem, net};
use std::{io, mem};
use actix_rt::{spawn, Arbiter, System};
use futures::future::{lazy, ok};
use futures::stream::futures_unordered;
use futures::sync::mpsc::{unbounded, UnboundedReceiver};
use futures::{Async, Future, Poll, Stream};
use actix_rt::net::TcpStream;
use actix_rt::time::{sleep_until, Instant};
use actix_rt::{spawn, System};
use log::{error, info};
use net2::TcpBuilder;
use num_cpus;
use tokio_tcp::TcpStream;
use tokio_timer::sleep;
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver};
use tokio::sync::oneshot;
use crate::accept::{AcceptLoop, AcceptNotify, Command};
use crate::accept::AcceptLoop;
use crate::config::{ConfiguredService, ServiceConfig};
use crate::server::{Server, ServerCommand};
use crate::services::{InternalServiceFactory, ServiceFactory, StreamNewService};
use crate::service::{InternalServiceFactory, ServiceFactory, StreamNewService};
use crate::signals::{Signal, Signals};
use crate::socket::StdListener;
use crate::worker::{self, Worker, WorkerAvailability, WorkerClient};
use crate::{ssl, Token};
use crate::socket::{MioListener, StdSocketAddr, StdTcpListener, ToSocketAddrs};
use crate::socket::{MioTcpListener, MioTcpSocket};
use crate::waker_queue::{WakerInterest, WakerQueue};
use crate::worker::{self, Worker, WorkerAvailability, WorkerHandle};
use crate::{join_all, Token};
/// Server builder
pub struct ServerBuilder {
threads: usize,
token: Token,
backlog: i32,
workers: Vec<(usize, WorkerClient)>,
backlog: u32,
handles: Vec<(usize, WorkerHandle)>,
services: Vec<Box<dyn InternalServiceFactory>>,
sockets: Vec<(Token, StdListener)>,
sockets: Vec<(Token, String, MioListener)>,
accept: AcceptLoop,
exit: bool,
shutdown_timeout: Duration,
no_signals: bool,
cmd: UnboundedReceiver<ServerCommand>,
server: Server,
notify: Vec<oneshot::Sender<()>>,
}
impl Default for ServerBuilder {
@@ -46,13 +48,13 @@ impl Default for ServerBuilder {
impl ServerBuilder {
/// Create new Server builder instance
pub fn new() -> ServerBuilder {
let (tx, rx) = unbounded();
let (tx, rx) = unbounded_channel();
let server = Server::new(tx);
ServerBuilder {
threads: num_cpus::get(),
token: Token(0),
workers: Vec::new(),
token: Token::default(),
handles: Vec::new(),
services: Vec::new(),
sockets: Vec::new(),
accept: AcceptLoop::new(server.clone()),
@@ -61,6 +63,7 @@ impl ServerBuilder {
shutdown_timeout: Duration::from_secs(30),
no_signals: false,
cmd: rx,
notify: Vec::new(),
server,
}
}
@@ -68,8 +71,9 @@ impl ServerBuilder {
/// Set number of workers to start.
///
/// By default server uses number of available logical cpu as workers
/// count.
/// count. Workers must be greater than 0.
pub fn workers(mut self, num: usize) -> Self {
assert_ne!(num, 0, "workers must be greater than 0");
self.threads = num;
self
}
@@ -84,7 +88,7 @@ impl ServerBuilder {
/// Generally set in the 64-2048 range. Default value is 2048.
///
/// This method should be called before `bind()` method call.
pub fn backlog(mut self, num: i32) -> Self {
pub fn backlog(mut self, num: u32) -> Self {
self.backlog = num;
self
}
@@ -100,17 +104,6 @@ impl ServerBuilder {
self
}
/// Sets the maximum per-worker concurrent connection establish process.
///
/// All listeners will stop accepting connections when this limit is reached. It
/// can be used to limit the global SSL CPU usage.
///
/// By default max connections is set to a 256.
pub fn maxconnrate(self, num: usize) -> Self {
ssl::max_concurrent_ssl_connect(num);
self
}
/// Stop actix system.
pub fn system_exit(mut self) -> Self {
self.exit = true;
@@ -152,8 +145,8 @@ impl ServerBuilder {
let mut srv = ConfiguredService::new(apply);
for (name, lst) in cfg.services {
let token = self.token.next();
srv.stream(token, name, lst.local_addr()?);
self.sockets.push((token, StdListener::Tcp(lst)));
srv.stream(token, name.clone(), lst.local_addr()?);
self.sockets.push((token, name, MioListener::Tcp(lst)));
}
self.services.push(Box::new(srv));
}
@@ -166,7 +159,7 @@ impl ServerBuilder {
pub fn bind<F, U, N: AsRef<str>>(mut self, name: N, addr: U, factory: F) -> io::Result<Self>
where
F: ServiceFactory<TcpStream>,
U: net::ToSocketAddrs,
U: ToSocketAddrs,
{
let sockets = bind_addr(addr, self.backlog)?;
@@ -178,36 +171,58 @@ impl ServerBuilder {
factory.clone(),
lst.local_addr()?,
));
self.sockets.push((token, StdListener::Tcp(lst)));
self.sockets
.push((token, name.as_ref().to_string(), MioListener::Tcp(lst)));
}
Ok(self)
}
#[cfg(all(unix, feature = "uds"))]
#[cfg(unix)]
/// Add new unix domain service to the server.
pub fn bind_uds<F, U, N>(mut self, name: N, addr: U, factory: F) -> io::Result<Self>
pub fn bind_uds<F, U, N>(self, name: N, addr: U, factory: F) -> io::Result<Self>
where
F: ServiceFactory<tokio_uds::UnixStream>,
F: ServiceFactory<actix_rt::net::UnixStream>,
N: AsRef<str>,
U: AsRef<std::path::Path>,
{
use std::net::{IpAddr, Ipv4Addr, SocketAddr};
use std::os::unix::net::UnixListener;
// The path must not exist when we try to bind.
// Try to remove it to avoid bind error.
if let Err(e) = std::fs::remove_file(addr.as_ref()) {
// NotFound is expected and not an issue. Anything else is.
if e.kind() != std::io::ErrorKind::NotFound {
return Err(e);
}
}
// TODO: need to do something with existing paths
let _ = std::fs::remove_file(addr.as_ref());
let lst = UnixListener::bind(addr)?;
let lst = crate::socket::StdUnixListener::bind(addr)?;
self.listen_uds(name, lst, factory)
}
#[cfg(unix)]
/// Add new unix domain service to the server.
/// Useful when running as a systemd service and
/// a socket FD can be acquired using the systemd crate.
pub fn listen_uds<F, N: AsRef<str>>(
mut self,
name: N,
lst: crate::socket::StdUnixListener,
factory: F,
) -> io::Result<Self>
where
F: ServiceFactory<actix_rt::net::UnixStream>,
{
use std::net::{IpAddr, Ipv4Addr};
lst.set_nonblocking(true)?;
let token = self.token.next();
let addr = SocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
let addr = StdSocketAddr::new(IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)), 8080);
self.services.push(StreamNewService::create(
name.as_ref().to_string(),
token,
factory.clone(),
factory,
addr,
));
self.sockets.push((token, StdListener::Uds(lst)));
self.sockets
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
Ok(self)
}
@@ -215,68 +230,56 @@ impl ServerBuilder {
pub fn listen<F, N: AsRef<str>>(
mut self,
name: N,
lst: net::TcpListener,
lst: StdTcpListener,
factory: F,
) -> io::Result<Self>
where
F: ServiceFactory<TcpStream>,
{
lst.set_nonblocking(true)?;
let addr = lst.local_addr()?;
let token = self.token.next();
self.services.push(StreamNewService::create(
name.as_ref().to_string(),
token,
factory,
lst.local_addr()?,
addr,
));
self.sockets.push((token, StdListener::Tcp(lst)));
self.sockets
.push((token, name.as_ref().to_string(), MioListener::from(lst)));
Ok(self)
}
/// Spawn new thread and start listening for incoming connections.
///
/// This method spawns new thread and starts new actix system. Other than
/// that it is similar to `start()` method. This method blocks.
///
/// This methods panics if no socket addresses get bound.
///
/// ```rust,ignore
/// use actix_web::*;
///
/// fn main() -> std::io::Result<()> {
/// Server::new().
/// .service(
/// HttpServer::new(|| App::new().service(web::service("/").to(|| HttpResponse::Ok())))
/// .bind("127.0.0.1:0")
/// .run()
/// }
/// ```
pub fn run(self) -> io::Result<()> {
let sys = System::new("http-server");
self.start();
sys.run()
}
/// Starts processing incoming connections and return server controller.
pub fn start(mut self) -> Server {
pub fn run(mut self) -> Server {
if self.sockets.is_empty() {
panic!("Server should have at least one bound socket");
} else {
info!("Starting {} workers", self.threads);
// start workers
let mut workers = Vec::new();
for idx in 0..self.threads {
let worker = self.start_worker(idx, self.accept.get_notify());
workers.push(worker.clone());
self.workers.push((idx, worker));
}
let handles = (0..self.threads)
.map(|idx| {
let handle = self.start_worker(idx, self.accept.waker_owned());
self.handles.push((idx, handle.clone()));
handle
})
.collect();
// start accept thread
for sock in &self.sockets {
info!("Starting server on {}", sock.1);
info!("Starting \"{}\" service on {}", sock.1, sock.2);
}
self.accept
.start(mem::replace(&mut self.sockets, Vec::new()), workers);
self.accept.start(
mem::take(&mut self.sockets)
.into_iter()
.map(|t| (t.0, t.2))
.collect(),
handles,
);
// handle signals
if !self.no_signals {
@@ -290,31 +293,21 @@ impl ServerBuilder {
}
}
fn start_worker(&self, idx: usize, notify: AcceptNotify) -> WorkerClient {
let (tx1, rx1) = unbounded();
let (tx2, rx2) = unbounded();
let timeout = self.shutdown_timeout;
let avail = WorkerAvailability::new(notify);
let worker = WorkerClient::new(idx, tx1, tx2, avail.clone());
let services: Vec<Box<dyn InternalServiceFactory>> =
self.services.iter().map(|v| v.clone_factory()).collect();
fn start_worker(&self, idx: usize, waker: WakerQueue) -> WorkerHandle {
let avail = WorkerAvailability::new(waker);
let services = self.services.iter().map(|v| v.clone_factory()).collect();
Arbiter::new().send(lazy(move || {
Worker::start(rx1, rx2, services, avail, timeout);
Ok::<_, ()>(())
}));
worker
Worker::start(idx, services, avail, self.shutdown_timeout)
}
fn handle_cmd(&mut self, item: ServerCommand) {
match item {
ServerCommand::Pause(tx) => {
self.accept.send(Command::Pause);
self.accept.wake(WakerInterest::Pause);
let _ = tx.send(());
}
ServerCommand::Resume(tx) => {
self.accept.send(Command::Resume);
self.accept.wake(WakerInterest::Resume);
let _ = tx.send(());
}
ServerCommand::Signal(sig) => {
@@ -348,6 +341,9 @@ impl ServerBuilder {
_ => (),
}
}
ServerCommand::Notify(tx) => {
self.notify.push(tx);
}
ServerCommand::Stop {
graceful,
completion,
@@ -355,48 +351,55 @@ impl ServerBuilder {
let exit = self.exit;
// stop accept thread
self.accept.send(Command::Stop);
self.accept.wake(WakerInterest::Stop);
let notify = std::mem::take(&mut self.notify);
// stop workers
if !self.workers.is_empty() && graceful {
spawn(
futures_unordered(
self.workers
.iter()
.map(move |worker| worker.1.stop(graceful)),
)
.collect()
.then(move |_| {
if let Some(tx) = completion {
let _ = tx.send(());
}
if exit {
spawn(sleep(Duration::from_millis(300)).then(|_| {
System::current().stop();
ok(())
}));
}
ok(())
}),
)
if !self.handles.is_empty() && graceful {
let iter = self
.handles
.iter()
.map(move |worker| worker.1.stop(graceful))
.collect();
let fut = join_all(iter);
spawn(async move {
let _ = fut.await;
if let Some(tx) = completion {
let _ = tx.send(());
}
for tx in notify {
let _ = tx.send(());
}
if exit {
spawn(async {
sleep_until(Instant::now() + Duration::from_millis(300)).await;
System::current().stop();
});
}
})
} else {
// we need to stop system if server was spawned
if self.exit {
spawn(sleep(Duration::from_millis(300)).then(|_| {
spawn(async {
sleep_until(Instant::now() + Duration::from_millis(300)).await;
System::current().stop();
ok(())
}));
});
}
if let Some(tx) = completion {
let _ = tx.send(());
}
for tx in notify {
let _ = tx.send(());
}
}
}
ServerCommand::WorkerDied(idx) => {
ServerCommand::WorkerFaulted(idx) => {
let mut found = false;
for i in 0..self.workers.len() {
if self.workers[i].0 == idx {
self.workers.swap_remove(i);
for i in 0..self.handles.len() {
if self.handles[i].0 == idx {
self.handles.swap_remove(i);
found = true;
break;
}
@@ -405,10 +408,10 @@ impl ServerBuilder {
if found {
error!("Worker has died {:?}, restarting", idx);
let mut new_idx = self.workers.len();
let mut new_idx = self.handles.len();
'found: loop {
for i in 0..self.workers.len() {
if self.workers[i].0 == new_idx {
for i in 0..self.handles.len() {
if self.handles[i].0 == new_idx {
new_idx += 1;
continue 'found;
}
@@ -416,9 +419,9 @@ impl ServerBuilder {
break;
}
let worker = self.start_worker(new_idx, self.accept.get_notify());
self.workers.push((new_idx, worker.clone()));
self.accept.send(Command::Worker(worker));
let handle = self.start_worker(new_idx, self.accept.waker_owned());
self.handles.push((new_idx, handle.clone()));
self.accept.wake(WakerInterest::Worker(handle));
}
}
}
@@ -426,24 +429,22 @@ impl ServerBuilder {
}
impl Future for ServerBuilder {
type Item = ();
type Error = ();
type Output = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
loop {
match self.cmd.poll() {
Ok(Async::Ready(None)) | Err(_) => return Ok(Async::Ready(())),
Ok(Async::NotReady) => return Ok(Async::NotReady),
Ok(Async::Ready(Some(item))) => self.handle_cmd(item),
match Pin::new(&mut self.cmd).poll_recv(cx) {
Poll::Ready(Some(it)) => self.as_mut().get_mut().handle_cmd(it),
_ => return Poll::Pending,
}
}
}
}
pub(super) fn bind_addr<S: net::ToSocketAddrs>(
pub(super) fn bind_addr<S: ToSocketAddrs>(
addr: S,
backlog: i32,
) -> io::Result<Vec<net::TcpListener>> {
backlog: u32,
) -> io::Result<Vec<MioTcpListener>> {
let mut err = None;
let mut succ = false;
let mut sockets = Vec::new();
@@ -471,12 +472,13 @@ pub(super) fn bind_addr<S: net::ToSocketAddrs>(
}
}
fn create_tcp_listener(addr: net::SocketAddr, backlog: i32) -> io::Result<net::TcpListener> {
let builder = match addr {
net::SocketAddr::V4(_) => TcpBuilder::new_v4()?,
net::SocketAddr::V6(_) => TcpBuilder::new_v6()?,
fn create_tcp_listener(addr: StdSocketAddr, backlog: u32) -> io::Result<MioTcpListener> {
let socket = match addr {
StdSocketAddr::V4(_) => MioTcpSocket::new_v4()?,
StdSocketAddr::V6(_) => MioTcpSocket::new_v6()?,
};
builder.reuse_address(true)?;
builder.bind(addr)?;
Ok(builder.listen(backlog)?)
socket.set_reuseaddr(true)?;
socket.bind(addr)?;
socket.listen(backlog)
}

View File

@@ -1,29 +1,30 @@
use std::collections::HashMap;
use std::{fmt, io, net};
use std::future::Future;
use std::{fmt, io};
use actix_server_config::{Io, ServerConfig};
use actix_service::{IntoNewService, NewService};
use futures::future::{join_all, Future};
use log::error;
use tokio_tcp::TcpStream;
use crate::counter::CounterGuard;
use super::builder::bind_addr;
use super::services::{
BoxedServerService, InternalServiceFactory, ServerMessage, StreamService,
use actix_rt::net::TcpStream;
use actix_service::{
fn_service, IntoServiceFactory as IntoBaseServiceFactory,
ServiceFactory as BaseServiceFactory,
};
use super::Token;
use actix_utils::counter::CounterGuard;
use futures_core::future::LocalBoxFuture;
use log::error;
use crate::builder::bind_addr;
use crate::service::{BoxedServerService, InternalServiceFactory, StreamService};
use crate::socket::{MioStream, MioTcpListener, StdSocketAddr, StdTcpListener, ToSocketAddrs};
use crate::{ready, Token};
pub struct ServiceConfig {
pub(crate) services: Vec<(String, net::TcpListener)>,
pub(crate) services: Vec<(String, MioTcpListener)>,
pub(crate) apply: Option<Box<dyn ServiceRuntimeConfiguration>>,
pub(crate) threads: usize,
pub(crate) backlog: i32,
pub(crate) backlog: u32,
}
impl ServiceConfig {
pub(super) fn new(threads: usize, backlog: i32) -> ServiceConfig {
pub(super) fn new(threads: usize, backlog: u32) -> ServiceConfig {
ServiceConfig {
threads,
backlog,
@@ -43,24 +44,20 @@ impl ServiceConfig {
/// Add new service to server
pub fn bind<U, N: AsRef<str>>(&mut self, name: N, addr: U) -> io::Result<&mut Self>
where
U: net::ToSocketAddrs,
U: ToSocketAddrs,
{
let sockets = bind_addr(addr, self.backlog)?;
for lst in sockets {
self.listen(name.as_ref(), lst);
self._listen(name.as_ref(), lst);
}
Ok(self)
}
/// Add new service to server
pub fn listen<N: AsRef<str>>(&mut self, name: N, lst: net::TcpListener) -> &mut Self {
if self.apply.is_none() {
self.apply = Some(Box::new(not_configured));
}
self.services.push((name.as_ref().to_string(), lst));
self
pub fn listen<N: AsRef<str>>(&mut self, name: N, lst: StdTcpListener) -> &mut Self {
self._listen(name, MioTcpListener::from_std(lst))
}
/// Register service configuration function. This function get called
@@ -72,12 +69,21 @@ impl ServiceConfig {
self.apply = Some(Box::new(f));
Ok(())
}
fn _listen<N: AsRef<str>>(&mut self, name: N, lst: MioTcpListener) -> &mut Self {
if self.apply.is_none() {
self.apply = Some(Box::new(not_configured));
}
self.services.push((name.as_ref().to_string(), lst));
self
}
}
pub(super) struct ConfiguredService {
rt: Box<dyn ServiceRuntimeConfiguration>,
names: HashMap<Token, (String, net::SocketAddr)>,
services: HashMap<String, Token>,
names: HashMap<Token, (String, StdSocketAddr)>,
topics: HashMap<String, Token>,
services: Vec<Token>,
}
impl ConfiguredService {
@@ -85,13 +91,15 @@ impl ConfiguredService {
ConfiguredService {
rt,
names: HashMap::new(),
services: HashMap::new(),
topics: HashMap::new(),
services: Vec::new(),
}
}
pub(super) fn stream(&mut self, token: Token, name: String, addr: net::SocketAddr) {
pub(super) fn stream(&mut self, token: Token, name: String, addr: StdSocketAddr) {
self.names.insert(token, (name.clone(), addr));
self.services.insert(name, token);
self.topics.insert(name, token);
self.services.push(token);
}
}
@@ -104,54 +112,52 @@ impl InternalServiceFactory for ConfiguredService {
Box::new(Self {
rt: self.rt.clone(),
names: self.names.clone(),
topics: self.topics.clone(),
services: self.services.clone(),
})
}
fn create(&self) -> Box<dyn Future<Item = Vec<(Token, BoxedServerService)>, Error = ()>> {
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>> {
// configure services
let mut rt = ServiceRuntime::new(self.services.clone());
let mut rt = ServiceRuntime::new(self.topics.clone());
self.rt.configure(&mut rt);
rt.validate();
let mut names = self.names.clone();
let tokens = self.services.clone();
let services = rt.services;
// on start futures
if rt.onstart.is_empty() {
// construct services
let mut fut = Vec::new();
for (token, ns) in services {
let config = ServerConfig::new(self.names[&token].1);
fut.push(ns.new_service(&config).map(move |service| (token, service)));
// construct services
Box::pin(async move {
let mut services = rt.services;
// TODO: Proper error handling here
for f in rt.onstart.into_iter() {
f.await;
}
Box::new(join_all(fut).map_err(|e| {
error!("Can not construct service: {:?}", e);
}))
} else {
let names = self.names.clone();
// run onstart future and then construct services
Box::new(
join_all(rt.onstart)
.map_err(|e| {
error!("Can not construct service: {:?}", e);
})
.and_then(move |_| {
// construct services
let mut fut = Vec::new();
for (token, ns) in services {
let config = ServerConfig::new(names[&token].1);
fut.push(
ns.new_service(&config).map(move |service| (token, service)),
);
let mut res = vec![];
for token in tokens {
if let Some(srv) = services.remove(&token) {
let newserv = srv.new_service(());
match newserv.await {
Ok(serv) => {
res.push((token, serv));
}
join_all(fut).map_err(|e| {
error!("Can not construct service: {:?}", e);
})
}),
)
}
Err(_) => {
error!("Can not construct service");
return Err(());
}
}
} else {
let name = names.remove(&token).unwrap().0;
res.push((
token,
Box::new(StreamService::new(fn_service(move |_: TcpStream| {
error!("Service {:?} is not configured", name);
ready::<Result<_, ()>>(Ok(()))
}))),
));
};
}
Ok(res)
})
}
}
@@ -181,7 +187,7 @@ fn not_configured(_: &mut ServiceRuntime) {
pub struct ServiceRuntime {
names: HashMap<String, Token>,
services: HashMap<Token, BoxedNewService>,
onstart: Vec<Box<dyn Future<Item = (), Error = ()>>>,
onstart: Vec<LocalBoxFuture<'static, ()>>,
}
impl ServiceRuntime {
@@ -207,8 +213,8 @@ impl ServiceRuntime {
/// *ServiceConfig::bind()* or *ServiceConfig::listen()* methods.
pub fn service<T, F>(&mut self, name: &str, service: F)
where
F: IntoNewService<T>,
T: NewService<Config = ServerConfig, Request = Io<TcpStream>> + 'static,
F: IntoBaseServiceFactory<T, TcpStream>,
T: BaseServiceFactory<TcpStream, Config = ()> + 'static,
T::Future: 'static,
T::Service: 'static,
T::InitError: fmt::Debug,
@@ -216,9 +222,9 @@ impl ServiceRuntime {
// let name = name.to_owned();
if let Some(token) = self.names.get(name) {
self.services.insert(
token.clone(),
*token,
Box::new(ServiceFactory {
inner: service.into_new_service(),
inner: service.into_factory(),
}),
);
} else {
@@ -229,21 +235,21 @@ impl ServiceRuntime {
/// Execute future before services initialization.
pub fn on_start<F>(&mut self, fut: F)
where
F: Future<Item = (), Error = ()> + 'static,
F: Future<Output = ()> + 'static,
{
self.onstart.push(Box::new(fut))
self.onstart.push(Box::pin(fut))
}
}
type BoxedNewService = Box<
dyn NewService<
Request = (Option<CounterGuard>, ServerMessage),
dyn BaseServiceFactory<
(Option<CounterGuard>, MioStream),
Response = (),
Error = (),
InitError = (),
Config = ServerConfig,
Config = (),
Service = BoxedServerService,
Future = Box<dyn Future<Item = BoxedServerService, Error = ()>>,
Future = LocalBoxFuture<'static, Result<BoxedServerService, ()>>,
>,
>;
@@ -251,26 +257,31 @@ struct ServiceFactory<T> {
inner: T,
}
impl<T> NewService for ServiceFactory<T>
impl<T> BaseServiceFactory<(Option<CounterGuard>, MioStream)> for ServiceFactory<T>
where
T: NewService<Config = ServerConfig, Request = Io<TcpStream>>,
T: BaseServiceFactory<TcpStream, Config = ()>,
T::Future: 'static,
T::Service: 'static,
T::Error: 'static,
T::InitError: fmt::Debug + 'static,
{
type Request = (Option<CounterGuard>, ServerMessage);
type Response = ();
type Error = ();
type InitError = ();
type Config = ServerConfig;
type Config = ();
type Service = BoxedServerService;
type Future = Box<dyn Future<Item = BoxedServerService, Error = ()>>;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<BoxedServerService, ()>>;
fn new_service(&self, cfg: &ServerConfig) -> Self::Future {
Box::new(self.inner.new_service(cfg).map_err(|_| ()).map(|s| {
let service: BoxedServerService = Box::new(StreamService::new(s));
service
}))
fn new_service(&self, _: ()) -> Self::Future {
let fut = self.inner.new_service(());
Box::pin(async move {
match fut.await {
Ok(s) => Ok(Box::new(StreamService::new(s)) as BoxedServerService),
Err(e) => {
error!("Can not construct service: {:?}", e);
Err(())
}
}
})
}
}

View File

@@ -1,80 +0,0 @@
use std::cell::Cell;
use std::rc::Rc;
use futures::task::AtomicTask;
#[derive(Clone)]
/// Simple counter with ability to notify task on reaching specific number
///
/// Counter could be cloned, total ncount is shared across all clones.
pub struct Counter(Rc<CounterInner>);
#[derive(Debug)]
struct CounterInner {
count: Cell<usize>,
capacity: usize,
task: AtomicTask,
}
impl Counter {
/// Create `Counter` instance and set max value.
pub fn new(capacity: usize) -> Self {
Counter(Rc::new(CounterInner {
capacity,
count: Cell::new(0),
task: AtomicTask::new(),
}))
}
pub fn get(&self) -> CounterGuard {
CounterGuard::new(self.0.clone())
}
/// Check if counter is not at capacity
pub fn available(&self) -> bool {
self.0.available()
}
/// Get total number of acquired counts
pub fn total(&self) -> usize {
self.0.count.get()
}
}
#[derive(Debug)]
pub struct CounterGuard(Rc<CounterInner>);
impl CounterGuard {
fn new(inner: Rc<CounterInner>) -> Self {
inner.inc();
CounterGuard(inner)
}
}
impl Drop for CounterGuard {
fn drop(&mut self) {
self.0.dec();
}
}
impl CounterInner {
fn inc(&self) {
self.count.set(self.count.get() + 1);
}
fn dec(&self) {
let num = self.count.get();
self.count.set(num - 1);
if num == self.capacity {
self.task.notify();
}
}
fn available(&self) -> bool {
let avail = self.count.get() < self.capacity;
if !avail {
self.task.register();
}
avail
}
}

View File

@@ -1,36 +1,50 @@
//! General purpose tcp server
//! General purpose TCP server.
#![deny(rust_2018_idioms, nonstandard_style)]
#![doc(html_logo_url = "https://actix.rs/img/logo.png")]
#![doc(html_favicon_url = "https://actix.rs/favicon.ico")]
mod accept;
mod builder;
mod config;
mod counter;
mod server;
mod services;
mod service;
mod signals;
mod socket;
pub mod ssl;
mod test_server;
mod waker_queue;
mod worker;
pub use actix_server_config::{Io, IoStream, Protocol, ServerConfig};
pub use self::builder::ServerBuilder;
pub use self::config::{ServiceConfig, ServiceRuntime};
pub use self::server::Server;
pub use self::services::ServiceFactory;
pub use self::service::ServiceFactory;
pub use self::test_server::TestServer;
#[doc(hidden)]
pub use self::socket::FromStream;
#[doc(hidden)]
pub use self::services::ServiceFactory as StreamServiceFactory;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
/// Socket id token
/// Socket ID token
#[derive(Clone, Copy, Debug, PartialEq, Eq, Hash)]
pub(crate) struct Token(usize);
impl Default for Token {
fn default() -> Self {
Self::new()
}
}
impl Token {
fn new() -> Self {
Self(0)
}
pub(crate) fn next(&mut self) -> Token {
let token = Token(self.0 + 1);
let token = Token(self.0);
self.0 += 1;
token
}
@@ -40,3 +54,90 @@ impl Token {
pub fn new() -> ServerBuilder {
ServerBuilder::default()
}
// temporary Ready type for std::future::{ready, Ready}; Can be removed when MSRV surpass 1.48
#[doc(hidden)]
pub struct Ready<T>(Option<T>);
pub(crate) fn ready<T>(t: T) -> Ready<T> {
Ready(Some(t))
}
impl<T> Unpin for Ready<T> {}
impl<T> Future for Ready<T> {
type Output = T;
fn poll(self: Pin<&mut Self>, _: &mut Context<'_>) -> Poll<Self::Output> {
Poll::Ready(self.get_mut().0.take().unwrap())
}
}
// a poor man's join future. joined future is only used when starting/stopping the server.
// pin_project and pinned futures are overkill for this task.
pub(crate) struct JoinAll<T> {
fut: Vec<JoinFuture<T>>,
}
pub(crate) fn join_all<T>(fut: Vec<impl Future<Output = T> + 'static>) -> JoinAll<T> {
let fut = fut
.into_iter()
.map(|f| JoinFuture::Future(Box::pin(f)))
.collect();
JoinAll { fut }
}
enum JoinFuture<T> {
Future(Pin<Box<dyn Future<Output = T>>>),
Result(Option<T>),
}
impl<T> Unpin for JoinAll<T> {}
impl<T> Future for JoinAll<T> {
type Output = Vec<T>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut ready = true;
let this = self.get_mut();
for fut in this.fut.iter_mut() {
if let JoinFuture::Future(f) = fut {
match f.as_mut().poll(cx) {
Poll::Ready(t) => {
*fut = JoinFuture::Result(Some(t));
}
Poll::Pending => ready = false,
}
}
}
if ready {
let mut res = Vec::new();
for fut in this.fut.iter_mut() {
if let JoinFuture::Result(f) = fut {
res.push(f.take().unwrap());
}
}
Poll::Ready(res)
} else {
Poll::Pending
}
}
}
#[cfg(test)]
mod test {
use super::*;
#[actix_rt::test]
async fn test_join_all() {
let futs = vec![ready(Ok(1)), ready(Err(3)), ready(Ok(9))];
let mut res = join_all(futs).await.into_iter();
assert_eq!(Ok(1), res.next().unwrap());
assert_eq!(Err(3), res.next().unwrap());
assert_eq!(Ok(9), res.next().unwrap());
}
}

View File

@@ -1,13 +1,17 @@
use futures::sync::mpsc::UnboundedSender;
use futures::sync::oneshot;
use futures::Future;
use std::future::Future;
use std::io;
use std::pin::Pin;
use std::task::{Context, Poll};
use tokio::sync::mpsc::UnboundedSender;
use tokio::sync::oneshot;
use crate::builder::ServerBuilder;
use crate::signals::Signal;
#[derive(Debug)]
pub(crate) enum ServerCommand {
WorkerDied(usize),
WorkerFaulted(usize),
Pause(oneshot::Sender<()>),
Resume(oneshot::Sender<()>),
Signal(Signal),
@@ -16,14 +20,19 @@ pub(crate) enum ServerCommand {
graceful: bool,
completion: Option<oneshot::Sender<()>>,
},
/// Notify of server stop
Notify(oneshot::Sender<()>),
}
#[derive(Debug, Clone)]
pub struct Server(UnboundedSender<ServerCommand>);
#[derive(Debug)]
pub struct Server(
UnboundedSender<ServerCommand>,
Option<oneshot::Receiver<()>>,
);
impl Server {
pub(crate) fn new(tx: UnboundedSender<ServerCommand>) -> Self {
Server(tx)
Server(tx, None)
}
/// Start server building process
@@ -32,39 +41,72 @@ impl Server {
}
pub(crate) fn signal(&self, sig: Signal) {
let _ = self.0.unbounded_send(ServerCommand::Signal(sig));
let _ = self.0.send(ServerCommand::Signal(sig));
}
pub(crate) fn worker_died(&self, idx: usize) {
let _ = self.0.unbounded_send(ServerCommand::WorkerDied(idx));
pub(crate) fn worker_faulted(&self, idx: usize) {
let _ = self.0.send(ServerCommand::WorkerFaulted(idx));
}
/// Pause accepting incoming connections
///
/// If socket contains some pending connection, they might be dropped.
/// All opened connection remains active.
pub fn pause(&self) -> impl Future<Item = (), Error = ()> {
pub fn pause(&self) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel();
let _ = self.0.unbounded_send(ServerCommand::Pause(tx));
rx.map_err(|_| ())
let _ = self.0.send(ServerCommand::Pause(tx));
async {
let _ = rx.await;
}
}
/// Resume accepting incoming connections
pub fn resume(&self) -> impl Future<Item = (), Error = ()> {
pub fn resume(&self) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel();
let _ = self.0.unbounded_send(ServerCommand::Resume(tx));
rx.map_err(|_| ())
let _ = self.0.send(ServerCommand::Resume(tx));
async {
let _ = rx.await;
}
}
/// Stop incoming connection processing, stop all workers and exit.
///
/// If server starts with `spawn()` method, then spawned thread get terminated.
pub fn stop(&self, graceful: bool) -> impl Future<Item = (), Error = ()> {
pub fn stop(&self, graceful: bool) -> impl Future<Output = ()> {
let (tx, rx) = oneshot::channel();
let _ = self.0.unbounded_send(ServerCommand::Stop {
let _ = self.0.send(ServerCommand::Stop {
graceful,
completion: Some(tx),
});
rx.map_err(|_| ())
async {
let _ = rx.await;
}
}
}
impl Clone for Server {
fn clone(&self) -> Self {
Self(self.0.clone(), None)
}
}
impl Future for Server {
type Output = io::Result<()>;
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.get_mut();
if this.1.is_none() {
let (tx, rx) = oneshot::channel();
if this.0.send(ServerCommand::Notify(tx)).is_err() {
return Poll::Ready(Ok(()));
}
this.1 = Some(rx);
}
match Pin::new(this.1.as_mut().unwrap()).poll(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(_) => Poll::Ready(Ok(())),
}
}
}

157
actix-server/src/service.rs Normal file
View File

@@ -0,0 +1,157 @@
use std::marker::PhantomData;
use std::net::SocketAddr;
use std::task::{Context, Poll};
use actix_service::{Service, ServiceFactory as BaseServiceFactory};
use actix_utils::counter::CounterGuard;
use futures_core::future::LocalBoxFuture;
use log::error;
use crate::socket::{FromStream, MioStream};
use crate::{ready, Ready, Token};
pub trait ServiceFactory<Stream: FromStream>: Send + Clone + 'static {
type Factory: BaseServiceFactory<Stream, Config = ()>;
fn create(&self) -> Self::Factory;
}
pub(crate) trait InternalServiceFactory: Send {
fn name(&self, token: Token) -> &str;
fn clone_factory(&self) -> Box<dyn InternalServiceFactory>;
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>>;
}
pub(crate) type BoxedServerService = Box<
dyn Service<
(Option<CounterGuard>, MioStream),
Response = (),
Error = (),
Future = Ready<Result<(), ()>>,
>,
>;
pub(crate) struct StreamService<S, I> {
service: S,
_phantom: PhantomData<I>,
}
impl<S, I> StreamService<S, I> {
pub(crate) fn new(service: S) -> Self {
StreamService {
service,
_phantom: PhantomData,
}
}
}
impl<S, I> Service<(Option<CounterGuard>, MioStream)> for StreamService<S, I>
where
S: Service<I>,
S::Future: 'static,
S::Error: 'static,
I: FromStream,
{
type Response = ();
type Error = ();
type Future = Ready<Result<(), ()>>;
fn poll_ready(&mut self, ctx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.service.poll_ready(ctx).map_err(|_| ())
}
fn call(&mut self, (guard, req): (Option<CounterGuard>, MioStream)) -> Self::Future {
ready(match FromStream::from_mio(req) {
Ok(stream) => {
let f = self.service.call(stream);
actix_rt::spawn(async move {
let _ = f.await;
drop(guard);
});
Ok(())
}
Err(e) => {
error!("Can not convert to an async tcp stream: {}", e);
Err(())
}
})
}
}
pub(crate) struct StreamNewService<F: ServiceFactory<Io>, Io: FromStream> {
name: String,
inner: F,
token: Token,
addr: SocketAddr,
_t: PhantomData<Io>,
}
impl<F, Io> StreamNewService<F, Io>
where
F: ServiceFactory<Io>,
Io: FromStream + Send + 'static,
{
pub(crate) fn create(
name: String,
token: Token,
inner: F,
addr: SocketAddr,
) -> Box<dyn InternalServiceFactory> {
Box::new(Self {
name,
token,
inner,
addr,
_t: PhantomData,
})
}
}
impl<F, Io> InternalServiceFactory for StreamNewService<F, Io>
where
F: ServiceFactory<Io>,
Io: FromStream + Send + 'static,
{
fn name(&self, _: Token) -> &str {
&self.name
}
fn clone_factory(&self) -> Box<dyn InternalServiceFactory> {
Box::new(Self {
name: self.name.clone(),
inner: self.inner.clone(),
token: self.token,
addr: self.addr,
_t: PhantomData,
})
}
fn create(&self) -> LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>> {
let token = self.token;
let fut = self.inner.create().new_service(());
Box::pin(async move {
match fut.await {
Ok(inner) => {
let service = Box::new(StreamService::new(inner)) as _;
Ok(vec![(token, service)])
}
Err(_) => Err(()),
}
})
}
}
impl<F, T, I> ServiceFactory<I> for F
where
F: Fn() -> T + Send + Clone + 'static,
T: BaseServiceFactory<I, Config = ()>,
I: FromStream,
{
type Factory = T;
fn create(&self) -> T {
(self)()
}
}

View File

@@ -1,186 +0,0 @@
use std::marker::PhantomData;
use std::net::SocketAddr;
use std::time::Duration;
use actix_rt::spawn;
use actix_server_config::{Io, ServerConfig};
use actix_service::{NewService, Service};
use futures::future::{err, ok, FutureResult};
use futures::{Future, Poll};
use log::error;
use super::Token;
use crate::counter::CounterGuard;
use crate::socket::{FromStream, StdStream};
/// Server message
pub(crate) enum ServerMessage {
/// New stream
Connect(StdStream),
/// Gracefull shutdown
Shutdown(Duration),
/// Force shutdown
ForceShutdown,
}
pub trait ServiceFactory<Stream: FromStream>: Send + Clone + 'static {
type NewService: NewService<Config = ServerConfig, Request = Io<Stream>>;
fn create(&self) -> Self::NewService;
}
pub(crate) trait InternalServiceFactory: Send {
fn name(&self, token: Token) -> &str;
fn clone_factory(&self) -> Box<dyn InternalServiceFactory>;
fn create(&self) -> Box<dyn Future<Item = Vec<(Token, BoxedServerService)>, Error = ()>>;
}
pub(crate) type BoxedServerService = Box<
dyn Service<
Request = (Option<CounterGuard>, ServerMessage),
Response = (),
Error = (),
Future = FutureResult<(), ()>,
>,
>;
pub(crate) struct StreamService<T> {
service: T,
}
impl<T> StreamService<T> {
pub(crate) fn new(service: T) -> Self {
StreamService { service }
}
}
impl<T, I> Service for StreamService<T>
where
T: Service<Request = Io<I>>,
T::Future: 'static,
T::Error: 'static,
I: FromStream,
{
type Request = (Option<CounterGuard>, ServerMessage);
type Response = ();
type Error = ();
type Future = FutureResult<(), ()>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
self.service.poll_ready().map_err(|_| ())
}
fn call(&mut self, (guard, req): (Option<CounterGuard>, ServerMessage)) -> Self::Future {
match req {
ServerMessage::Connect(stream) => {
let stream = FromStream::from_stdstream(stream).map_err(|e| {
error!("Can not convert to an async tcp stream: {}", e);
});
if let Ok(stream) = stream {
spawn(self.service.call(Io::new(stream)).then(move |res| {
drop(guard);
res.map_err(|_| ()).map(|_| ())
}));
ok(())
} else {
err(())
}
}
_ => ok(()),
}
}
}
pub(crate) struct StreamNewService<F: ServiceFactory<Io>, Io: FromStream> {
name: String,
inner: F,
token: Token,
addr: SocketAddr,
_t: PhantomData<Io>,
}
impl<F, Io> StreamNewService<F, Io>
where
F: ServiceFactory<Io>,
Io: FromStream + Send + 'static,
{
pub(crate) fn create(
name: String,
token: Token,
inner: F,
addr: SocketAddr,
) -> Box<dyn InternalServiceFactory> {
Box::new(Self {
name,
token,
inner,
addr,
_t: PhantomData,
})
}
}
impl<F, Io> InternalServiceFactory for StreamNewService<F, Io>
where
F: ServiceFactory<Io>,
Io: FromStream + Send + 'static,
{
fn name(&self, _: Token) -> &str {
&self.name
}
fn clone_factory(&self) -> Box<dyn InternalServiceFactory> {
Box::new(Self {
name: self.name.clone(),
inner: self.inner.clone(),
token: self.token,
addr: self.addr,
_t: PhantomData,
})
}
fn create(&self) -> Box<dyn Future<Item = Vec<(Token, BoxedServerService)>, Error = ()>> {
let token = self.token;
let config = ServerConfig::new(self.addr);
Box::new(
self.inner
.create()
.new_service(&config)
.map_err(|_| ())
.map(move |inner| {
let service: BoxedServerService = Box::new(StreamService::new(inner));
vec![(token, service)]
}),
)
}
}
impl InternalServiceFactory for Box<dyn InternalServiceFactory> {
fn name(&self, token: Token) -> &str {
self.as_ref().name(token)
}
fn clone_factory(&self) -> Box<dyn InternalServiceFactory> {
self.as_ref().clone_factory()
}
fn create(&self) -> Box<dyn Future<Item = Vec<(Token, BoxedServerService)>, Error = ()>> {
self.as_ref().create()
}
}
impl<F, T, I> ServiceFactory<I> for F
where
F: Fn() -> T + Send + Clone + 'static,
T: NewService<Config = ServerConfig, Request = Io<I>>,
I: FromStream,
{
type NewService = T;
fn create(&self) -> T {
(self)()
}
}

View File

@@ -1,12 +1,13 @@
use std::io;
use std::future::Future;
use std::pin::Pin;
use std::task::{Context, Poll};
use actix_rt::spawn;
use futures::stream::futures_unordered;
use futures::{Async, Future, Poll, Stream};
use futures_core::future::LocalBoxFuture;
use crate::server::Server;
/// Different types of process signals
#[allow(dead_code)]
#[derive(PartialEq, Clone, Copy, Debug)]
pub(crate) enum Signal {
/// SIGHUP
@@ -22,97 +23,76 @@ pub(crate) enum Signal {
pub(crate) struct Signals {
srv: Server,
#[cfg(not(unix))]
stream: SigStream,
signals: LocalBoxFuture<'static, std::io::Result<()>>,
#[cfg(unix)]
streams: Vec<SigStream>,
signals: Vec<(Signal, LocalBoxFuture<'static, ()>)>,
}
type SigStream = Box<dyn Stream<Item = Signal, Error = io::Error>>;
impl Signals {
pub(crate) fn start(srv: Server) {
let fut = {
#[cfg(not(unix))]
{
tokio_signal::ctrl_c()
.map_err(|_| ())
.and_then(move |stream| Signals {
srv,
stream: Box::new(stream.map(|_| Signal::Int)),
})
#[cfg(not(unix))]
{
actix_rt::spawn(Signals {
srv,
signals: Box::pin(actix_rt::signal::ctrl_c()),
});
}
#[cfg(unix)]
{
use actix_rt::signal::unix;
let sig_map = [
(unix::SignalKind::interrupt(), Signal::Int),
(unix::SignalKind::hangup(), Signal::Hup),
(unix::SignalKind::terminate(), Signal::Term),
(unix::SignalKind::quit(), Signal::Quit),
];
let mut signals = Vec::new();
for (kind, sig) in sig_map.iter() {
match unix::signal(*kind) {
Ok(mut stream) => {
let fut = Box::pin(async move {
let _ = stream.recv().await;
}) as _;
signals.push((*sig, fut));
}
Err(e) => log::error!(
"Can not initialize stream handler for {:?} err: {}",
sig,
e
),
}
}
#[cfg(unix)]
{
use tokio_signal::unix;
let mut sigs: Vec<Box<dyn Future<Item = SigStream, Error = io::Error>>> =
Vec::new();
sigs.push(Box::new(
tokio_signal::unix::Signal::new(tokio_signal::unix::SIGINT).map(|stream| {
let s: SigStream = Box::new(stream.map(|_| Signal::Int));
s
}),
));
sigs.push(Box::new(
tokio_signal::unix::Signal::new(tokio_signal::unix::SIGHUP).map(
|stream: unix::Signal| {
let s: SigStream = Box::new(stream.map(|_| Signal::Hup));
s
},
),
));
sigs.push(Box::new(
tokio_signal::unix::Signal::new(tokio_signal::unix::SIGTERM).map(
|stream| {
let s: SigStream = Box::new(stream.map(|_| Signal::Term));
s
},
),
));
sigs.push(Box::new(
tokio_signal::unix::Signal::new(tokio_signal::unix::SIGQUIT).map(
|stream| {
let s: SigStream = Box::new(stream.map(|_| Signal::Quit));
s
},
),
));
futures_unordered(sigs)
.collect()
.map_err(|_| ())
.and_then(move |streams| Signals { srv, streams })
}
};
spawn(fut);
actix_rt::spawn(Signals { srv, signals });
}
}
}
impl Future for Signals {
type Item = ();
type Error = ();
type Output = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
#[cfg(not(unix))]
loop {
match self.stream.poll() {
Ok(Async::Ready(None)) | Err(_) => return Ok(Async::Ready(())),
Ok(Async::Ready(Some(sig))) => self.srv.signal(sig),
Ok(Async::NotReady) => return Ok(Async::NotReady),
match self.signals.as_mut().poll(cx) {
Poll::Ready(_) => {
self.srv.signal(Signal::Int);
Poll::Ready(())
}
Poll::Pending => Poll::Pending,
}
#[cfg(unix)]
{
for s in &mut self.streams {
loop {
match s.poll() {
Ok(Async::Ready(None)) | Err(_) => return Ok(Async::Ready(())),
Ok(Async::NotReady) => break,
Ok(Async::Ready(Some(sig))) => self.srv.signal(sig),
}
for (sig, fut) in self.signals.iter_mut() {
if fut.as_mut().poll(cx).is_ready() {
let sig = *sig;
self.srv.signal(sig);
return Poll::Ready(());
}
}
Ok(Async::NotReady)
Poll::Pending
}
}
}

View File

@@ -1,136 +1,91 @@
use std::{fmt, io, net};
pub(crate) use std::net::{
SocketAddr as StdSocketAddr, TcpListener as StdTcpListener, ToSocketAddrs,
};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_reactor::Handle;
use tokio_tcp::TcpStream;
pub(crate) use mio::net::{TcpListener as MioTcpListener, TcpSocket as MioTcpSocket};
#[cfg(unix)]
pub(crate) use {
mio::net::UnixListener as MioUnixListener,
std::os::unix::net::UnixListener as StdUnixListener,
};
pub(crate) enum StdListener {
Tcp(net::TcpListener),
#[cfg(all(unix, feature = "uds"))]
Uds(std::os::unix::net::UnixListener),
use std::{fmt, io};
use actix_rt::net::TcpStream;
use mio::event::Source;
use mio::net::TcpStream as MioTcpStream;
use mio::{Interest, Registry, Token};
#[cfg(windows)]
use std::os::windows::io::{FromRawSocket, IntoRawSocket};
#[cfg(unix)]
use {
actix_rt::net::UnixStream,
mio::net::{SocketAddr as MioSocketAddr, UnixStream as MioUnixStream},
std::os::unix::io::{FromRawFd, IntoRawFd},
};
pub(crate) enum MioListener {
Tcp(MioTcpListener),
#[cfg(unix)]
Uds(MioUnixListener),
}
pub(crate) enum SocketAddr {
Tcp(net::SocketAddr),
#[cfg(all(unix, feature = "uds"))]
Uds(std::os::unix::net::SocketAddr),
}
impl fmt::Display for SocketAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
SocketAddr::Tcp(ref addr) => write!(f, "{}", addr),
#[cfg(all(unix, feature = "uds"))]
SocketAddr::Uds(ref addr) => write!(f, "{:?}", addr),
}
}
}
impl fmt::Debug for SocketAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
SocketAddr::Tcp(ref addr) => write!(f, "{:?}", addr),
#[cfg(all(unix, feature = "uds"))]
SocketAddr::Uds(ref addr) => write!(f, "{:?}", addr),
}
}
}
impl fmt::Display for StdListener {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
StdListener::Tcp(ref lst) => write!(f, "{}", lst.local_addr().ok().unwrap()),
#[cfg(all(unix, feature = "uds"))]
StdListener::Uds(ref lst) => write!(f, "{:?}", lst.local_addr().ok().unwrap()),
}
}
}
impl StdListener {
impl MioListener {
pub(crate) fn local_addr(&self) -> SocketAddr {
match self {
StdListener::Tcp(lst) => SocketAddr::Tcp(lst.local_addr().unwrap()),
#[cfg(all(unix, feature = "uds"))]
StdListener::Uds(lst) => SocketAddr::Uds(lst.local_addr().unwrap()),
}
}
pub(crate) fn into_listener(self) -> SocketListener {
match self {
StdListener::Tcp(lst) => SocketListener::Tcp(
mio::net::TcpListener::from_std(lst)
.expect("Can not create mio::net::TcpListener"),
),
#[cfg(all(unix, feature = "uds"))]
StdListener::Uds(lst) => SocketListener::Uds(
mio_uds::UnixListener::from_listener(lst)
.expect("Can not create mio_uds::UnixListener"),
),
}
}
}
#[derive(Debug)]
pub enum StdStream {
Tcp(std::net::TcpStream),
#[cfg(all(unix, feature = "uds"))]
Uds(std::os::unix::net::UnixStream),
}
pub(crate) enum SocketListener {
Tcp(mio::net::TcpListener),
#[cfg(all(unix, feature = "uds"))]
Uds(mio_uds::UnixListener),
}
impl SocketListener {
pub(crate) fn accept(&self) -> io::Result<Option<(StdStream, SocketAddr)>> {
match *self {
SocketListener::Tcp(ref lst) => lst
.accept_std()
.map(|(stream, addr)| Some((StdStream::Tcp(stream), SocketAddr::Tcp(addr)))),
#[cfg(all(unix, feature = "uds"))]
SocketListener::Uds(ref lst) => lst.accept_std().map(|res| {
res.map(|(stream, addr)| (StdStream::Uds(stream), SocketAddr::Uds(addr)))
}),
MioListener::Tcp(ref lst) => SocketAddr::Tcp(lst.local_addr().unwrap()),
#[cfg(unix)]
MioListener::Uds(ref lst) => SocketAddr::Uds(lst.local_addr().unwrap()),
}
}
pub(crate) fn accept(&self) -> io::Result<Option<(MioStream, SocketAddr)>> {
match *self {
MioListener::Tcp(ref lst) => lst
.accept()
.map(|(stream, addr)| Some((MioStream::Tcp(stream), SocketAddr::Tcp(addr)))),
#[cfg(unix)]
MioListener::Uds(ref lst) => lst
.accept()
.map(|(stream, addr)| Some((MioStream::Uds(stream), SocketAddr::Uds(addr)))),
}
}
}
impl mio::Evented for SocketListener {
impl Source for MioListener {
fn register(
&self,
poll: &mio::Poll,
token: mio::Token,
interest: mio::Ready,
opts: mio::PollOpt,
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
match *self {
SocketListener::Tcp(ref lst) => lst.register(poll, token, interest, opts),
#[cfg(all(unix, feature = "uds"))]
SocketListener::Uds(ref lst) => lst.register(poll, token, interest, opts),
MioListener::Tcp(ref mut lst) => lst.register(registry, token, interests),
#[cfg(unix)]
MioListener::Uds(ref mut lst) => lst.register(registry, token, interests),
}
}
fn reregister(
&self,
poll: &mio::Poll,
token: mio::Token,
interest: mio::Ready,
opts: mio::PollOpt,
&mut self,
registry: &Registry,
token: Token,
interests: Interest,
) -> io::Result<()> {
match *self {
SocketListener::Tcp(ref lst) => lst.reregister(poll, token, interest, opts),
#[cfg(all(unix, feature = "uds"))]
SocketListener::Uds(ref lst) => lst.reregister(poll, token, interest, opts),
MioListener::Tcp(ref mut lst) => lst.reregister(registry, token, interests),
#[cfg(unix)]
MioListener::Uds(ref mut lst) => lst.reregister(registry, token, interests),
}
}
fn deregister(&self, poll: &mio::Poll) -> io::Result<()> {
fn deregister(&mut self, registry: &Registry) -> io::Result<()> {
match *self {
SocketListener::Tcp(ref lst) => lst.deregister(poll),
#[cfg(all(unix, feature = "uds"))]
SocketListener::Uds(ref lst) => {
let res = lst.deregister(poll);
MioListener::Tcp(ref mut lst) => lst.deregister(registry),
#[cfg(unix)]
MioListener::Uds(ref mut lst) => {
let res = lst.deregister(registry);
// cleanup file path
if let Ok(addr) = lst.local_addr() {
@@ -144,30 +99,156 @@ impl mio::Evented for SocketListener {
}
}
pub trait FromStream: AsyncRead + AsyncWrite + Sized {
fn from_stdstream(sock: StdStream) -> io::Result<Self>;
impl From<StdTcpListener> for MioListener {
fn from(lst: StdTcpListener) -> Self {
MioListener::Tcp(MioTcpListener::from_std(lst))
}
}
#[cfg(unix)]
impl From<StdUnixListener> for MioListener {
fn from(lst: StdUnixListener) -> Self {
MioListener::Uds(MioUnixListener::from_std(lst))
}
}
impl fmt::Debug for MioListener {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
MioListener::Tcp(ref lst) => write!(f, "{:?}", lst),
#[cfg(all(unix))]
MioListener::Uds(ref lst) => write!(f, "{:?}", lst),
}
}
}
impl fmt::Display for MioListener {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
MioListener::Tcp(ref lst) => write!(f, "{}", lst.local_addr().ok().unwrap()),
#[cfg(unix)]
MioListener::Uds(ref lst) => write!(f, "{:?}", lst.local_addr().ok().unwrap()),
}
}
}
pub(crate) enum SocketAddr {
Tcp(StdSocketAddr),
#[cfg(unix)]
Uds(MioSocketAddr),
}
impl fmt::Display for SocketAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
SocketAddr::Tcp(ref addr) => write!(f, "{}", addr),
#[cfg(unix)]
SocketAddr::Uds(ref addr) => write!(f, "{:?}", addr),
}
}
}
impl fmt::Debug for SocketAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match *self {
SocketAddr::Tcp(ref addr) => write!(f, "{:?}", addr),
#[cfg(unix)]
SocketAddr::Uds(ref addr) => write!(f, "{:?}", addr),
}
}
}
#[derive(Debug)]
pub enum MioStream {
Tcp(MioTcpStream),
#[cfg(unix)]
Uds(MioUnixStream),
}
/// helper trait for converting mio stream to tokio stream.
pub trait FromStream: Sized {
fn from_mio(sock: MioStream) -> io::Result<Self>;
}
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
#[cfg(unix)]
impl FromStream for TcpStream {
fn from_stdstream(sock: StdStream) -> io::Result<Self> {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
StdStream::Tcp(stream) => TcpStream::from_std(stream, &Handle::default()),
#[cfg(all(unix, feature = "uds"))]
StdStream::Uds(_) => {
MioStream::Tcp(mio) => {
let raw = IntoRawFd::into_raw_fd(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
TcpStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
}
MioStream::Uds(_) => {
panic!("Should not happen, bug in server impl");
}
}
}
}
#[cfg(all(unix, feature = "uds"))]
impl FromStream for tokio_uds::UnixStream {
fn from_stdstream(sock: StdStream) -> io::Result<Self> {
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
#[cfg(windows)]
impl FromStream for TcpStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
StdStream::Tcp(_) => panic!("Should not happen, bug in server impl"),
StdStream::Uds(stream) => {
tokio_uds::UnixStream::from_std(stream, &Handle::default())
MioStream::Tcp(mio) => {
let raw = IntoRawSocket::into_raw_socket(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
TcpStream::from_std(unsafe { FromRawSocket::from_raw_socket(raw) })
}
}
}
}
// FIXME: This is a workaround and we need an efficient way to convert between mio and tokio stream
#[cfg(unix)]
impl FromStream for UnixStream {
fn from_mio(sock: MioStream) -> io::Result<Self> {
match sock {
MioStream::Tcp(_) => panic!("Should not happen, bug in server impl"),
MioStream::Uds(mio) => {
let raw = IntoRawFd::into_raw_fd(mio);
// SAFETY: This is a in place conversion from mio stream to tokio stream.
UnixStream::from_std(unsafe { FromRawFd::from_raw_fd(raw) })
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn socket_addr() {
let addr = SocketAddr::Tcp("127.0.0.1:8080".parse().unwrap());
assert!(format!("{:?}", addr).contains("127.0.0.1:8080"));
assert_eq!(format!("{}", addr), "127.0.0.1:8080");
let addr: StdSocketAddr = "127.0.0.1:0".parse().unwrap();
let socket = MioTcpSocket::new_v4().unwrap();
socket.set_reuseaddr(true).unwrap();
socket.bind(addr).unwrap();
let tcp = socket.listen(128).unwrap();
let lst = MioListener::Tcp(tcp);
assert!(format!("{:?}", lst).contains("TcpListener"));
assert!(format!("{}", lst).contains("127.0.0.1"));
}
#[test]
#[cfg(unix)]
fn uds() {
let _ = std::fs::remove_file("/tmp/sock.xxxxx");
if let Ok(socket) = MioUnixListener::bind("/tmp/sock.xxxxx") {
let addr = socket.local_addr().expect("Couldn't get local address");
let a = SocketAddr::Uds(addr);
assert!(format!("{:?}", a).contains("/tmp/sock.xxxxx"));
assert!(format!("{}", a).contains("/tmp/sock.xxxxx"));
let lst = MioListener::Uds(socket);
assert!(format!("{:?}", lst).contains("/tmp/sock.xxxxx"));
assert!(format!("{}", lst).contains("/tmp/sock.xxxxx"));
}
}
}

View File

@@ -1,42 +0,0 @@
//! SSL Services
use std::sync::atomic::{AtomicUsize, Ordering};
use crate::counter::Counter;
#[cfg(feature = "ssl")]
mod openssl;
#[cfg(feature = "ssl")]
pub use self::openssl::OpensslAcceptor;
#[cfg(feature = "tls")]
mod nativetls;
#[cfg(feature = "tls")]
pub use self::nativetls::{NativeTlsAcceptor, TlsStream};
#[cfg(feature = "rust-tls")]
mod rustls;
#[cfg(feature = "rust-tls")]
pub use self::rustls::RustlsAcceptor;
/// Sets the maximum per-worker concurrent ssl connection establish process.
///
/// All listeners will stop accepting connections when this limit is
/// reached. It can be used to limit the global SSL CPU usage.
///
/// By default max connections is set to a 256.
pub fn max_concurrent_ssl_connect(num: usize) {
MAX_CONN.store(num, Ordering::Relaxed);
}
pub(crate) static MAX_CONN: AtomicUsize = AtomicUsize::new(256);
thread_local! {
static MAX_CONN_COUNTER: Counter = Counter::new(MAX_CONN.load(Ordering::Relaxed));
}
/// Ssl error combinded with service error.
#[derive(Debug)]
pub enum SslError<E1, E2> {
Ssl(E1),
Service(E2),
}

View File

@@ -1,182 +0,0 @@
use std::io;
use std::marker::PhantomData;
use actix_service::{NewService, Service};
use futures::{future::ok, future::FutureResult, Async, Future, Poll};
use native_tls::{self, Error, HandshakeError, TlsAcceptor};
use tokio_io::{AsyncRead, AsyncWrite};
use crate::counter::{Counter, CounterGuard};
use crate::ssl::MAX_CONN_COUNTER;
use crate::{Io, Protocol, ServerConfig};
/// Support `SSL` connections via native-tls package
///
/// `tls` feature enables `NativeTlsAcceptor` type
pub struct NativeTlsAcceptor<T, P = ()> {
acceptor: TlsAcceptor,
io: PhantomData<(T, P)>,
}
impl<T: AsyncRead + AsyncWrite, P> NativeTlsAcceptor<T, P> {
/// Create `NativeTlsAcceptor` instance
pub fn new(acceptor: TlsAcceptor) -> Self {
NativeTlsAcceptor {
acceptor,
io: PhantomData,
}
}
}
impl<T: AsyncRead + AsyncWrite, P> Clone for NativeTlsAcceptor<T, P> {
fn clone(&self) -> Self {
Self {
acceptor: self.acceptor.clone(),
io: PhantomData,
}
}
}
impl<T: AsyncRead + AsyncWrite, P> NewService for NativeTlsAcceptor<T, P> {
type Request = Io<T, P>;
type Response = Io<TlsStream<T>, P>;
type Error = Error;
type Config = ServerConfig;
type Service = NativeTlsAcceptorService<T, P>;
type InitError = ();
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self, cfg: &ServerConfig) -> Self::Future {
cfg.set_secure();
MAX_CONN_COUNTER.with(|conns| {
ok(NativeTlsAcceptorService {
acceptor: self.acceptor.clone(),
conns: conns.clone(),
io: PhantomData,
})
})
}
}
pub struct NativeTlsAcceptorService<T, P> {
acceptor: TlsAcceptor,
io: PhantomData<(T, P)>,
conns: Counter,
}
impl<T: AsyncRead + AsyncWrite, P> Service for NativeTlsAcceptorService<T, P> {
type Request = Io<T, P>;
type Response = Io<TlsStream<T>, P>;
type Error = Error;
type Future = Accept<T, P>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
if self.conns.available() {
Ok(Async::Ready(()))
} else {
Ok(Async::NotReady)
}
}
fn call(&mut self, req: Self::Request) -> Self::Future {
let (io, params, _) = req.into_parts();
Accept {
_guard: self.conns.get(),
inner: Some(self.acceptor.accept(io)),
params: Some(params),
}
}
}
/// A wrapper around an underlying raw stream which implements the TLS or SSL
/// protocol.
///
/// A `TlsStream<S>` represents a handshake that has been completed successfully
/// and both the server and the client are ready for receiving and sending
/// data. Bytes read from a `TlsStream` are decrypted from `S` and bytes written
/// to a `TlsStream` are encrypted when passing through to `S`.
#[derive(Debug)]
pub struct TlsStream<S> {
inner: native_tls::TlsStream<S>,
}
/// Future returned from `NativeTlsAcceptor::accept` which will resolve
/// once the accept handshake has finished.
pub struct Accept<S, P> {
inner: Option<Result<native_tls::TlsStream<S>, HandshakeError<S>>>,
params: Option<P>,
_guard: CounterGuard,
}
impl<T: AsyncRead + AsyncWrite, P> Future for Accept<T, P> {
type Item = Io<TlsStream<T>, P>;
type Error = Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
match self.inner.take().expect("cannot poll MidHandshake twice") {
Ok(stream) => Ok(Async::Ready(Io::from_parts(
TlsStream { inner: stream },
self.params.take().unwrap(),
Protocol::Unknown,
))),
Err(HandshakeError::Failure(e)) => Err(e),
Err(HandshakeError::WouldBlock(s)) => match s.handshake() {
Ok(stream) => Ok(Async::Ready(Io::from_parts(
TlsStream { inner: stream },
self.params.take().unwrap(),
Protocol::Unknown,
))),
Err(HandshakeError::Failure(e)) => Err(e),
Err(HandshakeError::WouldBlock(s)) => {
self.inner = Some(Err(HandshakeError::WouldBlock(s)));
Ok(Async::NotReady)
}
},
}
}
}
impl<S> TlsStream<S> {
/// Get access to the internal `native_tls::TlsStream` stream which also
/// transitively allows access to `S`.
pub fn get_ref(&self) -> &native_tls::TlsStream<S> {
&self.inner
}
/// Get mutable access to the internal `native_tls::TlsStream` stream which
/// also transitively allows mutable access to `S`.
pub fn get_mut(&mut self) -> &mut native_tls::TlsStream<S> {
&mut self.inner
}
}
impl<S: io::Read + io::Write> io::Read for TlsStream<S> {
fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> {
self.inner.read(buf)
}
}
impl<S: io::Read + io::Write> io::Write for TlsStream<S> {
fn write(&mut self, buf: &[u8]) -> io::Result<usize> {
self.inner.write(buf)
}
fn flush(&mut self) -> io::Result<()> {
self.inner.flush()
}
}
impl<S: AsyncRead + AsyncWrite> AsyncRead for TlsStream<S> {}
impl<S: AsyncRead + AsyncWrite> AsyncWrite for TlsStream<S> {
fn shutdown(&mut self) -> Poll<(), io::Error> {
match self.inner.shutdown() {
Ok(_) => (),
Err(ref e) if e.kind() == io::ErrorKind::WouldBlock => (),
Err(e) => return Err(e),
}
self.inner.get_mut().shutdown()
}
}

View File

@@ -1,130 +0,0 @@
use std::marker::PhantomData;
use actix_service::{NewService, Service};
use futures::{future::ok, future::FutureResult, Async, Future, Poll};
use openssl::ssl::{HandshakeError, SslAcceptor};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_openssl::{AcceptAsync, SslAcceptorExt, SslStream};
use crate::counter::{Counter, CounterGuard};
use crate::ssl::MAX_CONN_COUNTER;
use crate::{Io, Protocol, ServerConfig};
/// Support `SSL` connections via openssl package
///
/// `ssl` feature enables `OpensslAcceptor` type
pub struct OpensslAcceptor<T: AsyncRead + AsyncWrite, P = ()> {
acceptor: SslAcceptor,
io: PhantomData<(T, P)>,
}
impl<T: AsyncRead + AsyncWrite, P> OpensslAcceptor<T, P> {
/// Create default `OpensslAcceptor`
pub fn new(acceptor: SslAcceptor) -> Self {
OpensslAcceptor {
acceptor,
io: PhantomData,
}
}
}
impl<T: AsyncRead + AsyncWrite, P> Clone for OpensslAcceptor<T, P> {
fn clone(&self) -> Self {
Self {
acceptor: self.acceptor.clone(),
io: PhantomData,
}
}
}
impl<T: AsyncRead + AsyncWrite, P> NewService for OpensslAcceptor<T, P> {
type Request = Io<T, P>;
type Response = Io<SslStream<T>, P>;
type Error = HandshakeError<T>;
type Config = ServerConfig;
type Service = OpensslAcceptorService<T, P>;
type InitError = ();
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self, cfg: &ServerConfig) -> Self::Future {
cfg.set_secure();
MAX_CONN_COUNTER.with(|conns| {
ok(OpensslAcceptorService {
acceptor: self.acceptor.clone(),
conns: conns.clone(),
io: PhantomData,
})
})
}
}
pub struct OpensslAcceptorService<T, P> {
acceptor: SslAcceptor,
conns: Counter,
io: PhantomData<(T, P)>,
}
impl<T: AsyncRead + AsyncWrite, P> Service for OpensslAcceptorService<T, P> {
type Request = Io<T, P>;
type Response = Io<SslStream<T>, P>;
type Error = HandshakeError<T>;
type Future = OpensslAcceptorServiceFut<T, P>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
if self.conns.available() {
Ok(Async::Ready(()))
} else {
Ok(Async::NotReady)
}
}
fn call(&mut self, req: Self::Request) -> Self::Future {
let (io, params, _) = req.into_parts();
OpensslAcceptorServiceFut {
_guard: self.conns.get(),
fut: SslAcceptorExt::accept_async(&self.acceptor, io),
params: Some(params),
}
}
}
pub struct OpensslAcceptorServiceFut<T, P>
where
T: AsyncRead + AsyncWrite,
{
fut: AcceptAsync<T>,
params: Option<P>,
_guard: CounterGuard,
}
impl<T: AsyncRead + AsyncWrite, P> Future for OpensslAcceptorServiceFut<T, P> {
type Item = Io<SslStream<T>, P>;
type Error = HandshakeError<T>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let io = futures::try_ready!(self.fut.poll());
let proto = if let Some(protos) = io.get_ref().ssl().selected_alpn_protocol() {
const H2: &[u8] = b"\x02h2";
const HTTP10: &[u8] = b"\x08http/1.0";
const HTTP11: &[u8] = b"\x08http/1.1";
if protos.windows(3).any(|window| window == H2) {
Protocol::Http2
} else if protos.windows(9).any(|window| window == HTTP11) {
Protocol::Http11
} else if protos.windows(9).any(|window| window == HTTP10) {
Protocol::Http10
} else {
Protocol::Unknown
}
} else {
Protocol::Unknown
};
Ok(Async::Ready(Io::from_parts(
io,
self.params.take().unwrap(),
proto,
)))
}
}

View File

@@ -1,116 +0,0 @@
use std::io;
use std::marker::PhantomData;
use std::sync::Arc;
use actix_service::{NewService, Service};
use futures::{future::ok, future::FutureResult, Async, Future, Poll};
use rustls::{ServerConfig, ServerSession};
use tokio_io::{AsyncRead, AsyncWrite};
use tokio_rustls::{Accept, TlsAcceptor, TlsStream};
use crate::counter::{Counter, CounterGuard};
use crate::ssl::MAX_CONN_COUNTER;
use crate::{Io, Protocol, ServerConfig as SrvConfig};
/// Support `SSL` connections via rustls package
///
/// `rust-tls` feature enables `RustlsAcceptor` type
pub struct RustlsAcceptor<T, P = ()> {
config: Arc<ServerConfig>,
io: PhantomData<(T, P)>,
}
impl<T: AsyncRead + AsyncWrite, P> RustlsAcceptor<T, P> {
/// Create `RustlsAcceptor` new service
pub fn new(config: ServerConfig) -> Self {
RustlsAcceptor {
config: Arc::new(config),
io: PhantomData,
}
}
}
impl<T, P> Clone for RustlsAcceptor<T, P> {
fn clone(&self) -> Self {
Self {
config: self.config.clone(),
io: PhantomData,
}
}
}
impl<T: AsyncRead + AsyncWrite, P> NewService for RustlsAcceptor<T, P> {
type Request = Io<T, P>;
type Response = Io<TlsStream<T, ServerSession>, P>;
type Error = io::Error;
type Config = SrvConfig;
type Service = RustlsAcceptorService<T, P>;
type InitError = ();
type Future = FutureResult<Self::Service, Self::InitError>;
fn new_service(&self, cfg: &SrvConfig) -> Self::Future {
cfg.set_secure();
MAX_CONN_COUNTER.with(|conns| {
ok(RustlsAcceptorService {
acceptor: self.config.clone().into(),
conns: conns.clone(),
io: PhantomData,
})
})
}
}
pub struct RustlsAcceptorService<T, P> {
acceptor: TlsAcceptor,
io: PhantomData<(T, P)>,
conns: Counter,
}
impl<T: AsyncRead + AsyncWrite, P> Service for RustlsAcceptorService<T, P> {
type Request = Io<T, P>;
type Response = Io<TlsStream<T, ServerSession>, P>;
type Error = io::Error;
type Future = RustlsAcceptorServiceFut<T, P>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
if self.conns.available() {
Ok(Async::Ready(()))
} else {
Ok(Async::NotReady)
}
}
fn call(&mut self, req: Self::Request) -> Self::Future {
let (io, params, _) = req.into_parts();
RustlsAcceptorServiceFut {
_guard: self.conns.get(),
fut: self.acceptor.accept(io),
params: Some(params),
}
}
}
pub struct RustlsAcceptorServiceFut<T, P>
where
T: AsyncRead + AsyncWrite,
{
fut: Accept<T>,
params: Option<P>,
_guard: CounterGuard,
}
impl<T: AsyncRead + AsyncWrite, P> Future for RustlsAcceptorServiceFut<T, P> {
type Item = Io<TlsStream<T, ServerSession>, P>;
type Error = io::Error;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
let io = futures::try_ready!(self.fut.poll());
Ok(Async::Ready(Io::from_parts(
io,
self.params.take().unwrap(),
Protocol::Unknown,
)))
}
}

View File

@@ -0,0 +1,144 @@
use std::sync::mpsc;
use std::{net, thread};
use actix_rt::{net::TcpStream, System};
use crate::{Server, ServerBuilder, ServiceFactory};
/// The `TestServer` type.
///
/// `TestServer` is very simple test server that simplify process of writing
/// integration tests for actix-net applications.
///
/// # Examples
///
/// ```
/// use actix_service::fn_service;
/// use actix_server::TestServer;
///
/// #[actix_rt::main]
/// async fn main() {
/// let srv = TestServer::with(|| fn_service(
/// |sock| async move {
/// println!("New connection: {:?}", sock);
/// Ok::<_, ()>(())
/// }
/// ));
///
/// println!("SOCKET: {:?}", srv.connect());
/// }
/// ```
pub struct TestServer;
/// Test server runtime
pub struct TestServerRuntime {
addr: net::SocketAddr,
host: String,
port: u16,
system: System,
}
impl TestServer {
/// Start new server with server builder
pub fn start<F>(mut factory: F) -> TestServerRuntime
where
F: FnMut(ServerBuilder) -> ServerBuilder + Send + 'static,
{
let (tx, rx) = mpsc::channel();
// run server in separate thread
thread::spawn(move || {
let sys = System::new("actix-test-server");
factory(Server::build()).workers(1).disable_signals().run();
tx.send(System::current()).unwrap();
sys.run()
});
let system = rx.recv().unwrap();
TestServerRuntime {
system,
addr: "127.0.0.1:0".parse().unwrap(),
host: "127.0.0.1".to_string(),
port: 0,
}
}
/// Start new test server with application factory
pub fn with<F: ServiceFactory<TcpStream>>(factory: F) -> TestServerRuntime {
let (tx, rx) = mpsc::channel();
// run server in separate thread
thread::spawn(move || {
let sys = System::new("actix-test-server");
let tcp = net::TcpListener::bind("127.0.0.1:0").unwrap();
let local_addr = tcp.local_addr().unwrap();
sys.block_on(async {
Server::build()
.listen("test", tcp, factory)
.unwrap()
.workers(1)
.disable_signals()
.run();
tx.send((System::current(), local_addr)).unwrap();
});
sys.run()
});
let (system, addr) = rx.recv().unwrap();
let host = format!("{}", addr.ip());
let port = addr.port();
TestServerRuntime {
system,
addr,
host,
port,
}
}
/// Get first available unused local address
pub fn unused_addr() -> net::SocketAddr {
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
let socket = mio::net::TcpSocket::new_v4().unwrap();
socket.bind(addr).unwrap();
socket.set_reuseaddr(true).unwrap();
let tcp = socket.listen(1024).unwrap();
tcp.local_addr().unwrap()
}
}
impl TestServerRuntime {
/// Test server host
pub fn host(&self) -> &str {
&self.host
}
/// Test server port
pub fn port(&self) -> u16 {
self.port
}
/// Get test server address
pub fn addr(&self) -> net::SocketAddr {
self.addr
}
/// Stop http server
fn stop(&mut self) {
self.system.stop();
}
/// Connect to server, return tokio TcpStream
pub fn connect(&self) -> std::io::Result<TcpStream> {
TcpStream::from_std(net::TcpStream::connect(self.addr)?)
}
}
impl Drop for TestServerRuntime {
fn drop(&mut self) {
self.stop()
}
}

View File

@@ -0,0 +1,89 @@
use std::{
collections::VecDeque,
ops::Deref,
sync::{Arc, Mutex, MutexGuard},
};
use mio::{Registry, Token as MioToken, Waker};
use crate::worker::WorkerHandle;
/// waker token for `mio::Poll` instance
pub(crate) const WAKER_TOKEN: MioToken = MioToken(usize::MAX);
/// `mio::Waker` with a queue for waking up the `Accept`'s `Poll` and contains the `WakerInterest`
/// the `Poll` would want to look into.
pub(crate) struct WakerQueue(Arc<(Waker, Mutex<VecDeque<WakerInterest>>)>);
impl Clone for WakerQueue {
fn clone(&self) -> Self {
Self(self.0.clone())
}
}
impl Deref for WakerQueue {
type Target = (Waker, Mutex<VecDeque<WakerInterest>>);
fn deref(&self) -> &Self::Target {
self.0.deref()
}
}
impl WakerQueue {
/// construct a waker queue with given `Poll`'s `Registry` and capacity.
///
/// A fixed `WAKER_TOKEN` is used to identify the wake interest and the `Poll` needs to match
/// event's token for it to properly handle `WakerInterest`.
pub(crate) fn new(registry: &Registry) -> std::io::Result<Self> {
let waker = Waker::new(registry, WAKER_TOKEN)?;
let queue = Mutex::new(VecDeque::with_capacity(16));
Ok(Self(Arc::new((waker, queue))))
}
/// push a new interest to the queue and wake up the accept poll afterwards.
pub(crate) fn wake(&self, interest: WakerInterest) {
let (waker, queue) = self.deref();
queue
.lock()
.expect("Failed to lock WakerQueue")
.push_back(interest);
waker
.wake()
.unwrap_or_else(|e| panic!("can not wake up Accept Poll: {}", e));
}
/// get a MutexGuard of the waker queue.
pub(crate) fn guard(&self) -> MutexGuard<'_, VecDeque<WakerInterest>> {
self.deref().1.lock().expect("Failed to lock WakerQueue")
}
/// reset the waker queue so it does not grow infinitely.
pub(crate) fn reset(queue: &mut VecDeque<WakerInterest>) {
std::mem::swap(&mut VecDeque::<WakerInterest>::with_capacity(16), queue);
}
}
/// types of interests we would look into when `Accept`'s `Poll` is waked up by waker.
///
/// *. These interests should not be confused with `mio::Interest` and mostly not I/O related
pub(crate) enum WakerInterest {
/// `WorkerAvailable` is an interest from `Worker` notifying `Accept` there is a worker
/// available and can accept new tasks.
WorkerAvailable,
/// `Pause`, `Resume`, `Stop` Interest are from `ServerBuilder` future. It listens to
/// `ServerCommand` and notify `Accept` to do exactly these tasks.
Pause,
Resume,
Stop,
/// `Timer` is an interest sent as a delayed future. When an error happens on accepting
/// connection `Accept` would deregister socket listener temporary and wake up the poll and
/// register them again after the delayed future resolve.
Timer,
/// `Worker` is an interest happen after a worker runs into faulted state(This is determined
/// by if work can be sent to it successfully).`Accept` would be waked up and add the new
/// `WorkerHandle`.
Worker(WorkerHandle),
}

View File

@@ -1,19 +1,22 @@
use std::future::Future;
use std::pin::Pin;
use std::sync::atomic::{AtomicBool, AtomicUsize, Ordering};
use std::sync::Arc;
use std::{mem, time};
use std::task::{Context, Poll};
use std::time::Duration;
use actix_rt::time::{sleep_until, Instant, Sleep};
use actix_rt::{spawn, Arbiter};
use futures::sync::mpsc::{UnboundedReceiver, UnboundedSender};
use futures::sync::oneshot;
use futures::{future, Async, Future, Poll, Stream};
use actix_utils::counter::Counter;
use futures_core::future::LocalBoxFuture;
use log::{error, info, trace};
use tokio_timer::{sleep, Delay};
use tokio::sync::mpsc::{unbounded_channel, UnboundedReceiver, UnboundedSender};
use tokio::sync::oneshot;
use crate::accept::AcceptNotify;
use crate::counter::Counter;
use crate::services::{BoxedServerService, InternalServiceFactory, ServerMessage};
use crate::socket::{SocketAddr, StdStream};
use crate::Token;
use crate::service::{BoxedServerService, InternalServiceFactory};
use crate::socket::{MioStream, SocketAddr};
use crate::waker_queue::{WakerInterest, WakerQueue};
use crate::{join_all, Token};
pub(crate) struct WorkerCommand(Conn);
@@ -26,7 +29,7 @@ pub(crate) struct StopCommand {
#[derive(Debug)]
pub(crate) struct Conn {
pub io: StdStream,
pub io: MioStream,
pub token: Token,
pub peer: Option<SocketAddr>,
}
@@ -43,31 +46,33 @@ pub fn max_concurrent_connections(num: usize) {
MAX_CONNS.store(num, Ordering::Relaxed);
}
pub(crate) fn num_connections() -> usize {
MAX_CONNS_COUNTER.with(|conns| conns.total())
}
thread_local! {
static MAX_CONNS_COUNTER: Counter =
Counter::new(MAX_CONNS.load(Ordering::Relaxed));
}
pub(crate) fn num_connections() -> usize {
MAX_CONNS_COUNTER.with(|conns| conns.total())
}
// a handle to worker that can send message to worker and share the availability of worker to other
// thread.
#[derive(Clone)]
pub(crate) struct WorkerClient {
pub(crate) struct WorkerHandle {
pub idx: usize,
tx1: UnboundedSender<WorkerCommand>,
tx2: UnboundedSender<StopCommand>,
avail: WorkerAvailability,
}
impl WorkerClient {
impl WorkerHandle {
pub fn new(
idx: usize,
tx1: UnboundedSender<WorkerCommand>,
tx2: UnboundedSender<StopCommand>,
avail: WorkerAvailability,
) -> Self {
WorkerClient {
WorkerHandle {
idx,
tx1,
tx2,
@@ -76,9 +81,7 @@ impl WorkerClient {
}
pub fn send(&self, msg: Conn) -> Result<(), Conn> {
self.tx1
.unbounded_send(WorkerCommand(msg))
.map_err(|msg| msg.into_inner().0)
self.tx1.send(WorkerCommand(msg)).map_err(|msg| msg.0 .0)
}
pub fn available(&self) -> bool {
@@ -87,21 +90,21 @@ impl WorkerClient {
pub fn stop(&self, graceful: bool) -> oneshot::Receiver<bool> {
let (result, rx) = oneshot::channel();
let _ = self.tx2.unbounded_send(StopCommand { graceful, result });
let _ = self.tx2.send(StopCommand { graceful, result });
rx
}
}
#[derive(Clone)]
pub(crate) struct WorkerAvailability {
notify: AcceptNotify,
waker: WakerQueue,
available: Arc<AtomicBool>,
}
impl WorkerAvailability {
pub fn new(notify: AcceptNotify) -> Self {
pub fn new(waker: WakerQueue) -> Self {
WorkerAvailability {
notify,
waker,
available: Arc::new(AtomicBool::new(false)),
}
}
@@ -112,8 +115,9 @@ impl WorkerAvailability {
pub fn set(&self, val: bool) {
let old = self.available.swap(val, Ordering::Release);
// notify the accept on switched to available.
if !old && val {
self.notify.notify()
self.waker.wake(WakerInterest::WorkerAvailable);
}
}
}
@@ -125,100 +129,154 @@ impl WorkerAvailability {
pub(crate) struct Worker {
rx: UnboundedReceiver<WorkerCommand>,
rx2: UnboundedReceiver<StopCommand>,
services: Vec<Option<(usize, BoxedServerService)>>,
services: Vec<WorkerService>,
availability: WorkerAvailability,
conns: Counter,
factories: Vec<Box<dyn InternalServiceFactory>>,
state: WorkerState,
shutdown_timeout: time::Duration,
shutdown_timeout: Duration,
}
struct WorkerService {
factory: usize,
status: WorkerServiceStatus,
service: BoxedServerService,
}
impl WorkerService {
fn created(&mut self, service: BoxedServerService) {
self.service = service;
self.status = WorkerServiceStatus::Unavailable;
}
}
#[derive(Copy, Clone, Debug, PartialEq, Eq)]
enum WorkerServiceStatus {
Available,
Unavailable,
Failed,
Restarting,
Stopping,
Stopped,
}
impl Worker {
pub(crate) fn start(
rx: UnboundedReceiver<WorkerCommand>,
rx2: UnboundedReceiver<StopCommand>,
idx: usize,
factories: Vec<Box<dyn InternalServiceFactory>>,
availability: WorkerAvailability,
shutdown_timeout: time::Duration,
) {
availability.set(false);
let mut wrk = MAX_CONNS_COUNTER.with(|conns| Worker {
rx,
rx2,
availability,
factories,
shutdown_timeout,
services: Vec::new(),
conns: conns.clone(),
state: WorkerState::Unavailable(Vec::new()),
});
shutdown_timeout: Duration,
) -> WorkerHandle {
let (tx1, rx) = unbounded_channel();
let (tx2, rx2) = unbounded_channel();
let avail = availability.clone();
let mut fut = Vec::new();
for (idx, factory) in wrk.factories.iter().enumerate() {
fut.push(factory.create().map(move |res| {
res.into_iter()
.map(|(t, s)| (idx, t, s))
.collect::<Vec<_>>()
}));
}
spawn(
future::join_all(fut)
.map_err(|e| {
error!("Can not start worker: {:?}", e);
Arbiter::current().stop();
// every worker runs in it's own arbiter.
Arbiter::new().send(Box::pin(async move {
availability.set(false);
let mut wrk = MAX_CONNS_COUNTER.with(move |conns| Worker {
rx,
rx2,
availability,
factories,
shutdown_timeout,
services: Vec::new(),
conns: conns.clone(),
state: WorkerState::Unavailable,
});
let fut = wrk
.factories
.iter()
.enumerate()
.map(|(idx, factory)| {
let fut = factory.create();
async move {
fut.await.map(|r| {
r.into_iter().map(|(t, s)| (idx, t, s)).collect::<Vec<_>>()
})
}
})
.and_then(move |services| {
for item in services {
for (idx, token, service) in item {
while token.0 >= wrk.services.len() {
wrk.services.push(None);
.collect::<Vec<_>>();
spawn(async move {
let res: Result<Vec<_>, _> = join_all(fut).await.into_iter().collect();
match res {
Ok(services) => {
for item in services {
for (factory, token, service) in item {
assert_eq!(token.0, wrk.services.len());
wrk.services.push(WorkerService {
factory,
service,
status: WorkerServiceStatus::Unavailable,
});
}
wrk.services[token.0] = Some((idx, service));
}
}
wrk
}),
);
Err(e) => {
error!("Can not start worker: {:?}", e);
Arbiter::current().stop();
}
}
wrk.await
});
}));
WorkerHandle::new(idx, tx1, tx2, avail)
}
fn shutdown(&mut self, force: bool) {
if force {
self.services.iter_mut().for_each(|h| {
if let Some(h) = h {
let _ = h.1.call((None, ServerMessage::ForceShutdown));
self.services.iter_mut().for_each(|srv| {
if srv.status == WorkerServiceStatus::Available {
srv.status = WorkerServiceStatus::Stopped;
}
});
} else {
let timeout = self.shutdown_timeout;
self.services.iter_mut().for_each(move |h| {
if let Some(h) = h {
let _ = h.1.call((None, ServerMessage::Shutdown(timeout)));
self.services.iter_mut().for_each(move |srv| {
if srv.status == WorkerServiceStatus::Available {
srv.status = WorkerServiceStatus::Stopping;
}
});
}
}
fn check_readiness(&mut self, trace: bool) -> Result<bool, (Token, usize)> {
let mut ready = self.conns.available();
fn check_readiness(&mut self, cx: &mut Context<'_>) -> Result<bool, (Token, usize)> {
let mut ready = self.conns.available(cx);
let mut failed = None;
for (token, service) in &mut self.services.iter_mut().enumerate() {
if let Some(service) = service {
match service.1.poll_ready() {
Ok(Async::Ready(_)) => {
if trace {
for (idx, srv) in self.services.iter_mut().enumerate() {
if srv.status == WorkerServiceStatus::Available
|| srv.status == WorkerServiceStatus::Unavailable
{
match srv.service.poll_ready(cx) {
Poll::Ready(Ok(_)) => {
if srv.status == WorkerServiceStatus::Unavailable {
trace!(
"Service {:?} is available",
self.factories[service.0].name(Token(token))
self.factories[srv.factory].name(Token(idx))
);
srv.status = WorkerServiceStatus::Available;
}
}
Ok(Async::NotReady) => ready = false,
Err(_) => {
Poll::Pending => {
ready = false;
if srv.status == WorkerServiceStatus::Available {
trace!(
"Service {:?} is unavailable",
self.factories[srv.factory].name(Token(idx))
);
srv.status = WorkerServiceStatus::Unavailable;
}
}
Poll::Ready(Err(_)) => {
error!(
"Service {:?} readiness check returned error, restarting",
self.factories[service.0].name(Token(token))
self.factories[srv.factory].name(Token(idx))
);
failed = Some((Token(token), service.0));
failed = Some((Token(idx), srv.factory));
srv.status = WorkerServiceStatus::Failed;
}
}
}
@@ -232,207 +290,158 @@ impl Worker {
}
enum WorkerState {
None,
Available,
Unavailable(Vec<Conn>),
Unavailable,
Restarting(
usize,
Token,
Box<dyn Future<Item = Vec<(Token, BoxedServerService)>, Error = ()>>,
LocalBoxFuture<'static, Result<Vec<(Token, BoxedServerService)>, ()>>,
),
Shutdown(
Pin<Box<Sleep>>,
Pin<Box<Sleep>>,
Option<oneshot::Sender<bool>>,
),
Shutdown(Delay, Delay, oneshot::Sender<bool>),
}
impl Future for Worker {
type Item = ();
type Error = ();
type Output = ();
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
// `StopWorker` message handler
if let Ok(Async::Ready(Some(StopCommand { graceful, result }))) = self.rx2.poll() {
if let Poll::Ready(Some(StopCommand { graceful, result })) =
Pin::new(&mut self.rx2).poll_recv(cx)
{
self.availability.set(false);
let num = num_connections();
if num == 0 {
info!("Shutting down worker, 0 connections");
let _ = result.send(true);
return Ok(Async::Ready(()));
return Poll::Ready(());
} else if graceful {
self.shutdown(false);
let num = num_connections();
if num != 0 {
info!("Graceful worker shutdown, {} connections", num);
self.state = WorkerState::Shutdown(
sleep(time::Duration::from_secs(1)),
sleep(self.shutdown_timeout),
result,
Box::pin(sleep_until(Instant::now() + Duration::from_secs(1))),
Box::pin(sleep_until(Instant::now() + self.shutdown_timeout)),
Some(result),
);
} else {
let _ = result.send(true);
return Ok(Async::Ready(()));
return Poll::Ready(());
}
} else {
info!("Force shutdown worker, {} connections", num);
self.shutdown(true);
let _ = result.send(false);
return Ok(Async::Ready(()));
return Poll::Ready(());
}
}
let state = mem::replace(&mut self.state, WorkerState::None);
match state {
WorkerState::Unavailable(mut conns) => {
match self.check_readiness(true) {
Ok(true) => {
self.state = WorkerState::Available;
// process requests from wait queue
while let Some(msg) = conns.pop() {
match self.check_readiness(false) {
Ok(true) => {
let guard = self.conns.get();
let _ = self.services[msg.token.0]
.as_mut()
.expect("actix net bug")
.1
.call((Some(guard), ServerMessage::Connect(msg.io)));
}
Ok(false) => {
trace!("Worker is unavailable");
self.state = WorkerState::Unavailable(conns);
return self.poll();
}
Err((token, idx)) => {
trace!(
"Service {:?} failed, restarting",
self.factories[idx].name(token)
);
self.state = WorkerState::Restarting(
idx,
token,
self.factories[idx].create(),
);
return self.poll();
}
}
match self.state {
WorkerState::Unavailable => match self.check_readiness(cx) {
Ok(true) => {
self.state = WorkerState::Available;
self.availability.set(true);
self.poll(cx)
}
Ok(false) => Poll::Pending,
Err((token, idx)) => {
trace!(
"Service {:?} failed, restarting",
self.factories[idx].name(token)
);
self.services[token.0].status = WorkerServiceStatus::Restarting;
self.state =
WorkerState::Restarting(idx, token, self.factories[idx].create());
self.poll(cx)
}
},
WorkerState::Restarting(idx, token, ref mut fut) => {
match fut.as_mut().poll(cx) {
Poll::Ready(Ok(item)) => {
// only interest in the first item?
if let Some((token, service)) = item.into_iter().next() {
trace!(
"Service {:?} has been restarted",
self.factories[idx].name(token)
);
self.services[token.0].created(service);
self.state = WorkerState::Unavailable;
return self.poll(cx);
}
self.availability.set(true);
return self.poll();
}
Poll::Ready(Err(_)) => {
panic!(
"Can not restart {:?} service",
self.factories[idx].name(token)
);
}
Poll::Pending => return Poll::Pending,
}
self.poll(cx)
}
WorkerState::Shutdown(ref mut t1, ref mut t2, ref mut tx) => {
let num = num_connections();
if num == 0 {
let _ = tx.take().unwrap().send(true);
Arbiter::current().stop();
return Poll::Ready(());
}
// check graceful timeout
if Pin::new(t2).poll(cx).is_ready() {
let _ = tx.take().unwrap().send(false);
self.shutdown(true);
Arbiter::current().stop();
return Poll::Ready(());
}
// sleep for 1 second and then check again
if t1.as_mut().poll(cx).is_ready() {
*t1 = Box::pin(sleep_until(Instant::now() + Duration::from_secs(1)));
let _ = t1.as_mut().poll(cx);
}
Poll::Pending
}
// actively poll stream and handle worker command
WorkerState::Available => loop {
match self.check_readiness(cx) {
Ok(true) => (),
Ok(false) => {
self.state = WorkerState::Unavailable(conns);
return Ok(Async::NotReady);
trace!("Worker is unavailable");
self.availability.set(false);
self.state = WorkerState::Unavailable;
return self.poll(cx);
}
Err((token, idx)) => {
trace!(
"Service {:?} failed, restarting",
self.factories[idx].name(token)
);
self.availability.set(false);
self.services[token.0].status = WorkerServiceStatus::Restarting;
self.state =
WorkerState::Restarting(idx, token, self.factories[idx].create());
return self.poll();
}
}
}
WorkerState::Restarting(idx, token, mut fut) => {
match fut.poll() {
Ok(Async::Ready(item)) => {
for (token, service) in item {
trace!(
"Service {:?} has been restarted",
self.factories[idx].name(token)
);
self.services[token.0] = Some((idx, service));
self.state = WorkerState::Unavailable(Vec::new());
}
}
Ok(Async::NotReady) => {
self.state = WorkerState::Restarting(idx, token, fut);
return Ok(Async::NotReady);
}
Err(_) => {
panic!(
"Can not restart {:?} service",
self.factories[idx].name(token)
);
}
}
return self.poll();
}
WorkerState::Shutdown(mut t1, mut t2, tx) => {
let num = num_connections();
if num == 0 {
let _ = tx.send(true);
Arbiter::current().stop();
return Ok(Async::Ready(()));
}
// check graceful timeout
match t2.poll().unwrap() {
Async::NotReady => (),
Async::Ready(_) => {
self.shutdown(true);
let _ = tx.send(false);
Arbiter::current().stop();
return Ok(Async::Ready(()));
return self.poll(cx);
}
}
// sleep for 1 second and then check again
match t1.poll().unwrap() {
Async::NotReady => (),
Async::Ready(_) => {
t1 = sleep(time::Duration::from_secs(1));
let _ = t1.poll();
match Pin::new(&mut self.rx).poll_recv(cx) {
// handle incoming io stream
Poll::Ready(Some(WorkerCommand(msg))) => {
let guard = self.conns.get();
let _ = self.services[msg.token.0]
.service
.call((Some(guard), msg.io));
}
}
self.state = WorkerState::Shutdown(t1, t2, tx);
return Ok(Async::NotReady);
}
WorkerState::Available => {
loop {
match self.rx.poll() {
// handle incoming tcp stream
Ok(Async::Ready(Some(WorkerCommand(msg)))) => {
match self.check_readiness(false) {
Ok(true) => {
let guard = self.conns.get();
let _ = self.services[msg.token.0]
.as_mut()
.expect("actix-server bug")
.1
.call((Some(guard), ServerMessage::Connect(msg.io)));
continue;
}
Ok(false) => {
trace!("Worker is unavailable");
self.availability.set(false);
self.state = WorkerState::Unavailable(vec![msg]);
}
Err((token, idx)) => {
trace!(
"Service {:?} failed, restarting",
self.factories[idx].name(token)
);
self.availability.set(false);
self.state = WorkerState::Restarting(
idx,
token,
self.factories[idx].create(),
);
}
}
return self.poll();
}
Ok(Async::NotReady) => {
self.state = WorkerState::Available;
return Ok(Async::NotReady);
}
Ok(Async::Ready(None)) | Err(_) => return Ok(Async::Ready(())),
}
}
}
WorkerState::None => panic!(),
};
Poll::Pending => return Poll::Pending,
Poll::Ready(None) => return Poll::Ready(()),
};
},
}
}
}

View File

@@ -1,21 +1,17 @@
use std::io::Read;
use std::sync::mpsc;
use std::sync::atomic::{AtomicUsize, Ordering::Relaxed};
use std::sync::{mpsc, Arc};
use std::{net, thread, time};
use actix_codec::{BytesCodec, Framed};
use actix_server::{Io, Server, ServerConfig};
use actix_service::{new_service_cfg, service_fn, IntoService};
use bytes::Bytes;
use futures::{Future, Sink};
use net2::TcpBuilder;
use tokio_tcp::TcpStream;
use actix_server::Server;
use actix_service::fn_service;
use futures_util::future::{lazy, ok};
fn unused_addr() -> net::SocketAddr {
let addr: net::SocketAddr = "127.0.0.1:0".parse().unwrap();
let socket = TcpBuilder::new_v4().unwrap();
socket.bind(&addr).unwrap();
socket.reuse_address(true).unwrap();
let tcp = socket.to_tcp_listener().unwrap();
let socket = mio::net::TcpSocket::new_v4().unwrap();
socket.bind(addr).unwrap();
socket.set_reuseaddr(true).unwrap();
let tcp = socket.listen(32).unwrap();
tcp.local_addr().unwrap()
}
@@ -26,15 +22,14 @@ fn test_bind() {
let h = thread::spawn(move || {
let sys = actix_rt::System::new("test");
let srv = Server::build()
.bind("test", addr, move || {
new_service_cfg(move |cfg: &ServerConfig| {
assert_eq!(cfg.local_addr(), addr);
Ok::<_, ()>((|_| Ok::<_, ()>(())).into_service())
})
})
.unwrap()
.start();
let srv = sys.block_on(lazy(|_| {
Server::build()
.workers(1)
.disable_signals()
.bind("test", addr, move || fn_service(|_| ok::<_, ()>(())))
.unwrap()
.run()
}));
let _ = tx.send((srv, actix_rt::System::current()));
let _ = sys.run();
});
@@ -42,27 +37,7 @@ fn test_bind() {
thread::sleep(time::Duration::from_millis(500));
assert!(net::TcpStream::connect(addr).is_ok());
let _ = sys.stop();
let _ = h.join();
}
#[test]
fn test_bind_no_config() {
let addr = unused_addr();
let (tx, rx) = mpsc::channel();
let h = thread::spawn(move || {
let sys = actix_rt::System::new("test");
let srv = Server::build()
.bind("test", addr, move || service_fn(|_| Ok::<_, ()>(())))
.unwrap()
.start();
let _ = tx.send((srv, actix_rt::System::current()));
let _ = sys.run();
});
let (_, sys) = rx.recv().unwrap();
assert!(net::TcpStream::connect(addr).is_ok());
let _ = sys.stop();
sys.stop();
let _ = h.join();
}
@@ -74,58 +49,61 @@ fn test_listen() {
let h = thread::spawn(move || {
let sys = actix_rt::System::new("test");
let lst = net::TcpListener::bind(addr).unwrap();
let srv = Server::build()
.listen("test", lst, move || {
new_service_cfg(move |cfg: &ServerConfig| {
assert_eq!(cfg.local_addr(), addr);
Ok::<_, ()>((|_| Ok::<_, ()>(())).into_service())
})
})
.unwrap()
.start();
let _ = tx.send((srv, actix_rt::System::current()));
sys.block_on(async {
Server::build()
.disable_signals()
.workers(1)
.listen("test", lst, move || fn_service(|_| ok::<_, ()>(())))
.unwrap()
.run();
let _ = tx.send(actix_rt::System::current());
});
let _ = sys.run();
});
let (_, sys) = rx.recv().unwrap();
let sys = rx.recv().unwrap();
thread::sleep(time::Duration::from_millis(500));
assert!(net::TcpStream::connect(addr).is_ok());
let _ = sys.stop();
sys.stop();
let _ = h.join();
}
#[test]
#[cfg(unix)]
fn test_start() {
use actix_codec::{BytesCodec, Framed};
use actix_rt::net::TcpStream;
use bytes::Bytes;
use futures_util::sink::SinkExt;
use std::io::Read;
let addr = unused_addr();
let (tx, rx) = mpsc::channel();
let h = thread::spawn(move || {
let sys = actix_rt::System::new("test");
let srv = Server::build()
.backlog(100)
.bind("test", addr, move || {
new_service_cfg(move |cfg: &ServerConfig| {
assert_eq!(cfg.local_addr(), addr);
Ok::<_, ()>(
(|io: Io<TcpStream>| {
Framed::new(io.into_parts().0, BytesCodec)
.send(Bytes::from_static(b"test"))
.then(|_| Ok::<_, ()>(()))
})
.into_service(),
)
let srv = sys.block_on(lazy(|_| {
Server::build()
.backlog(100)
.disable_signals()
.bind("test", addr, move || {
fn_service(|io: TcpStream| async move {
let mut f = Framed::new(io, BytesCodec);
f.send(Bytes::from_static(b"test")).await.unwrap();
Ok::<_, ()>(())
})
})
})
.unwrap()
.start();
.unwrap()
.run()
}));
let _ = tx.send((srv, actix_rt::System::current()));
let _ = sys.run();
});
let (srv, sys) = rx.recv().unwrap();
let mut buf = [0u8; 4];
let mut buf = [1u8; 4];
let mut conn = net::TcpStream::connect(addr).unwrap();
let _ = conn.read_exact(&mut buf);
assert_eq!(buf, b"test"[..]);
@@ -157,6 +135,56 @@ fn test_start() {
assert!(net::TcpStream::connect(addr).is_err());
thread::sleep(time::Duration::from_millis(100));
let _ = sys.stop();
sys.stop();
let _ = h.join();
}
#[test]
fn test_configure() {
let addr1 = unused_addr();
let addr2 = unused_addr();
let addr3 = unused_addr();
let (tx, rx) = mpsc::channel();
let num = Arc::new(AtomicUsize::new(0));
let num2 = num.clone();
let h = thread::spawn(move || {
let num = num2.clone();
let sys = actix_rt::System::new("test");
let srv = sys.block_on(lazy(|_| {
Server::build()
.disable_signals()
.configure(move |cfg| {
let num = num.clone();
let lst = net::TcpListener::bind(addr3).unwrap();
cfg.bind("addr1", addr1)
.unwrap()
.bind("addr2", addr2)
.unwrap()
.listen("addr3", lst)
.apply(move |rt| {
let num = num.clone();
rt.service("addr1", fn_service(|_| ok::<_, ()>(())));
rt.service("addr3", fn_service(|_| ok::<_, ()>(())));
rt.on_start(lazy(move |_| {
let _ = num.fetch_add(1, Relaxed);
}))
})
})
.unwrap()
.workers(1)
.run()
}));
let _ = tx.send((srv, actix_rt::System::current()));
let _ = sys.run();
});
let (_, sys) = rx.recv().unwrap();
thread::sleep(time::Duration::from_millis(500));
assert!(net::TcpStream::connect(addr1).is_ok());
assert!(net::TcpStream::connect(addr2).is_ok());
assert!(net::TcpStream::connect(addr3).is_ok());
assert_eq!(num.load(Relaxed), 1);
sys.stop();
let _ = h.join();
}

View File

@@ -1,5 +1,122 @@
# Changes
## Unreleased - 2021-xx-xx
* The `forward_ready!` macro converts errors. [#246]
[#246]: https://github.com/actix/actix-net/pull/246
## 2.0.0-beta.2 - 2021-01-03
* Remove redundant type parameter from `map_config`.
## 2.0.0-beta.1 - 2020-12-28
* `Service`, other traits, and many type signatures now take the the request type as a type
parameter instead of an associated type. [#232]
* Add `always_ready!` and `forward_ready!` macros. [#233]
* Crate is now `no_std`. [#233]
* Migrate pin projections to `pin-project-lite`. [#233]
* Remove `AndThenApplyFn` and Pipeline `and_then_apply_fn`. Use the
`.and_then(apply_fn(...))` construction. [#233]
* Move non-vital methods to `ServiceExt` and `ServiceFactoryExt` extension traits. [#235]
[#232]: https://github.com/actix/actix-net/pull/232
[#233]: https://github.com/actix/actix-net/pull/233
[#235]: https://github.com/actix/actix-net/pull/235
## 1.0.6 - 2020-08-09
### Fixed
* Removed unsound custom Cell implementation that allowed obtaining several mutable references to
the same data, which is undefined behavior in Rust and could lead to violations of memory safety. External code could obtain several mutable references to the same data through
service combinators. Attempts to acquire several mutable references to the same data will instead
result in a panic.
## [1.0.5] - 2020-01-16
### Fixed
* Fixed unsoundness in .and_then()/.then() service combinators
## [1.0.4] - 2020-01-15
### Fixed
* Revert 1.0.3 change
## [1.0.3] - 2020-01-15
### Fixed
* Fixed unsoundness in `AndThenService` impl
## [1.0.2] - 2020-01-08
### Added
* Add `into_service` helper function
## [1.0.1] - 2019-12-22
### Changed
* `map_config()` and `unit_config()` accepts `IntoServiceFactory` type
## [1.0.0] - 2019-12-11
### Added
* Add Clone impl for Apply service
## [1.0.0-alpha.4] - 2019-12-08
### Changed
* Renamed `service_fn` to `fn_service`
* Renamed `factory_fn` to `fn_factory`
* Renamed `factory_fn_cfg` to `fn_factory_with_config`
## [1.0.0-alpha.3] - 2019-12-06
### Changed
* Add missing Clone impls
* Restore `Transform::map_init_err()` combinator
* Restore `Service/Factory::apply_fn()` in form of `Pipeline/Factory::and_then_apply_fn()`
* Optimize service combinators and futures memory layout
## [1.0.0-alpha.2] - 2019-12-02
### Changed
* Use owned config value for service factory
* Renamed BoxedNewService/BoxedService to BoxServiceFactory/BoxService
## [1.0.0-alpha.1] - 2019-11-25
### Changed
* Migraded to `std::future`
* `NewService` renamed to `ServiceFactory`
* Added `pipeline` and `pipeline_factory` function
## [0.4.2] - 2019-08-27
### Fixed

View File

@@ -1,29 +1,28 @@
[package]
name = "actix-service"
version = "0.4.2"
authors = ["Nikolay Kim <fafhrd91@gmail.com>"]
description = "Actix Service"
keywords = ["network", "framework", "async", "futures"]
version = "2.0.0-beta.2"
authors = [
"Nikolay Kim <fafhrd91@gmail.com>",
"Rob Ede <robjtede@icloud.com>",
]
description = "Service trait and combinators for representing asynchronous request/response operations."
keywords = ["network", "framework", "async", "futures", "service"]
homepage = "https://actix.rs"
repository = "https://github.com/actix/actix-net.git"
documentation = "https://docs.rs/actix-service/"
documentation = "https://docs.rs/actix-service"
readme = "README.md"
categories = ["network-programming", "asynchronous"]
license = "MIT/Apache-2.0"
exclude = [".gitignore", ".travis.yml", ".cargo/config", "appveyor.yml"]
license = "MIT OR Apache-2.0"
edition = "2018"
workspace = ".."
[badges]
travis-ci = { repository = "actix/actix-service", branch = "master" }
appveyor = { repository = "actix/actix-net" }
codecov = { repository = "actix/actix-service", branch = "master", service = "github" }
[lib]
name = "actix_service"
path = "src/lib.rs"
[dependencies]
futures = "0.1.25"
futures-core = { version = "0.3.7", default-features = false }
pin-project-lite = "0.2"
[dev-dependencies]
actix-rt = "0.2"
actix-rt = "1.0.0"
futures-util = { version = "0.3.7", default-features = false }

7
actix-service/README.md Normal file
View File

@@ -0,0 +1,7 @@
# actix-service
> Service trait and combinators for representing asynchronous request/response operations.
See documentation for detailed explanations these components: [https://docs.rs/actix-service][docs].
[docs]: https://docs.rs/actix-service

View File

@@ -1,197 +1,237 @@
use futures::{Async, Future, Poll};
use alloc::rc::Rc;
use core::{
cell::RefCell,
future::Future,
marker::PhantomData,
pin::Pin,
task::{Context, Poll},
};
use super::{IntoNewService, NewService, Service};
use crate::cell::Cell;
use pin_project_lite::pin_project;
use super::{Service, ServiceFactory};
/// Service for the `and_then` combinator, chaining a computation onto the end
/// of another service which completes successfully.
///
/// This is created by the `ServiceExt::and_then` method.
pub struct AndThen<A, B> {
a: A,
b: Cell<B>,
}
/// This is created by the `Pipeline::and_then` method.
pub(crate) struct AndThenService<A, B, Req>(Rc<RefCell<(A, B)>>, PhantomData<Req>);
impl<A, B> AndThen<A, B> {
impl<A, B, Req> AndThenService<A, B, Req> {
/// Create new `AndThen` combinator
pub fn new(a: A, b: B) -> Self
pub(crate) fn new(a: A, b: B) -> Self
where
A: Service,
B: Service<Request = A::Response, Error = A::Error>,
A: Service<Req>,
B: Service<A::Response, Error = A::Error>,
{
Self { a, b: Cell::new(b) }
Self(Rc::new(RefCell::new((a, b))), PhantomData)
}
}
impl<A, B> Clone for AndThen<A, B>
where
A: Clone,
{
impl<A, B, Req> Clone for AndThenService<A, B, Req> {
fn clone(&self) -> Self {
AndThen {
a: self.a.clone(),
b: self.b.clone(),
}
AndThenService(self.0.clone(), PhantomData)
}
}
impl<A, B> Service for AndThen<A, B>
impl<A, B, Req> Service<Req> for AndThenService<A, B, Req>
where
A: Service,
B: Service<Request = A::Response, Error = A::Error>,
A: Service<Req>,
B: Service<A::Response, Error = A::Error>,
{
type Request = A::Request;
type Response = B::Response;
type Error = A::Error;
type Future = AndThenFuture<A, B>;
type Future = AndThenServiceResponse<A, B, Req>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
let not_ready = self.a.poll_ready()?.is_not_ready();
if self.b.get_mut().poll_ready()?.is_not_ready() || not_ready {
Ok(Async::NotReady)
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
let mut srv = self.0.borrow_mut();
let not_ready = !srv.0.poll_ready(cx)?.is_ready();
if !srv.1.poll_ready(cx)?.is_ready() || not_ready {
Poll::Pending
} else {
Ok(Async::Ready(()))
Poll::Ready(Ok(()))
}
}
fn call(&mut self, req: A::Request) -> Self::Future {
AndThenFuture::new(self.a.call(req), self.b.clone())
}
}
pub struct AndThenFuture<A, B>
where
A: Service,
B: Service<Request = A::Response, Error = A::Error>,
{
b: Cell<B>,
fut_b: Option<B::Future>,
fut_a: Option<A::Future>,
}
impl<A, B> AndThenFuture<A, B>
where
A: Service,
B: Service<Request = A::Response, Error = A::Error>,
{
fn new(a: A::Future, b: Cell<B>) -> Self {
AndThenFuture {
b,
fut_a: Some(a),
fut_b: None,
fn call(&mut self, req: Req) -> Self::Future {
AndThenServiceResponse {
state: State::A {
fut: self.0.borrow_mut().0.call(req),
b: Some(self.0.clone()),
},
}
}
}
impl<A, B> Future for AndThenFuture<A, B>
pin_project! {
pub(crate) struct AndThenServiceResponse<A, B, Req>
where
A: Service<Req>,
B: Service<A::Response, Error = A::Error>,
{
#[pin]
state: State<A, B, Req>,
}
}
pin_project! {
#[project = StateProj]
enum State<A, B, Req>
where
A: Service<Req>,
B: Service<A::Response, Error = A::Error>,
{
A {
#[pin]
fut: A::Future,
b: Option<Rc<RefCell<(A, B)>>>,
},
B {
#[pin]
fut: B::Future,
},
Empty,
}
}
impl<A, B, Req> Future for AndThenServiceResponse<A, B, Req>
where
A: Service,
B: Service<Request = A::Response, Error = A::Error>,
A: Service<Req>,
B: Service<A::Response, Error = A::Error>,
{
type Item = B::Response;
type Error = A::Error;
type Output = Result<B::Response, A::Error>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if let Some(ref mut fut) = self.fut_b {
return fut.poll();
}
fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let mut this = self.as_mut().project();
match self.fut_a.as_mut().expect("Bug in actix-service").poll() {
Ok(Async::Ready(resp)) => {
let _ = self.fut_a.take();
self.fut_b = Some(self.b.get_mut().call(resp));
self.poll()
match this.state.as_mut().project() {
StateProj::A { fut, b } => match fut.poll(cx)? {
Poll::Ready(res) => {
let b = b.take().unwrap();
this.state.set(State::Empty); // drop fut A
let fut = b.borrow_mut().1.call(res);
this.state.set(State::B { fut });
self.poll(cx)
}
Poll::Pending => Poll::Pending,
},
StateProj::B { fut } => fut.poll(cx).map(|r| {
this.state.set(State::Empty);
r
}),
StateProj::Empty => {
panic!("future must not be polled after it returned `Poll::Ready`")
}
Ok(Async::NotReady) => Ok(Async::NotReady),
Err(err) => Err(err),
}
}
}
/// `AndThenNewService` new service combinator
pub struct AndThenNewService<A, B>
/// `.and_then()` service factory combinator
pub(crate) struct AndThenServiceFactory<A, B, Req>
where
A: NewService,
B: NewService,
{
a: A,
b: B,
}
impl<A, B> AndThenNewService<A, B>
where
A: NewService,
B: NewService<
A: ServiceFactory<Req>,
A::Config: Clone,
B: ServiceFactory<
A::Response,
Config = A::Config,
Request = A::Response,
Error = A::Error,
InitError = A::InitError,
>,
{
/// Create new `AndThen` combinator
pub fn new<F: IntoNewService<B>>(a: A, f: F) -> Self {
inner: Rc<(A, B)>,
_phantom: PhantomData<Req>,
}
impl<A, B, Req> AndThenServiceFactory<A, B, Req>
where
A: ServiceFactory<Req>,
A::Config: Clone,
B: ServiceFactory<
A::Response,
Config = A::Config,
Error = A::Error,
InitError = A::InitError,
>,
{
/// Create new `AndThenFactory` combinator
pub(crate) fn new(a: A, b: B) -> Self {
Self {
a,
b: f.into_new_service(),
inner: Rc::new((a, b)),
_phantom: PhantomData,
}
}
}
impl<A, B> NewService for AndThenNewService<A, B>
impl<A, B, Req> ServiceFactory<Req> for AndThenServiceFactory<A, B, Req>
where
A: NewService,
B: NewService<
A: ServiceFactory<Req>,
A::Config: Clone,
B: ServiceFactory<
A::Response,
Config = A::Config,
Request = A::Response,
Error = A::Error,
InitError = A::InitError,
>,
{
type Request = A::Request;
type Response = B::Response;
type Error = A::Error;
type Config = A::Config;
type Service = AndThen<A::Service, B::Service>;
type Service = AndThenService<A::Service, B::Service, Req>;
type InitError = A::InitError;
type Future = AndThenNewServiceFuture<A, B>;
type Future = AndThenServiceFactoryResponse<A, B, Req>;
fn new_service(&self, cfg: &A::Config) -> Self::Future {
AndThenNewServiceFuture::new(self.a.new_service(cfg), self.b.new_service(cfg))
fn new_service(&self, cfg: A::Config) -> Self::Future {
let inner = &*self.inner;
AndThenServiceFactoryResponse::new(
inner.0.new_service(cfg.clone()),
inner.1.new_service(cfg),
)
}
}
impl<A, B> Clone for AndThenNewService<A, B>
impl<A, B, Req> Clone for AndThenServiceFactory<A, B, Req>
where
A: NewService + Clone,
B: NewService + Clone,
A: ServiceFactory<Req>,
A::Config: Clone,
B: ServiceFactory<
A::Response,
Config = A::Config,
Error = A::Error,
InitError = A::InitError,
>,
{
fn clone(&self) -> Self {
Self {
a: self.a.clone(),
b: self.b.clone(),
inner: self.inner.clone(),
_phantom: PhantomData,
}
}
}
pub struct AndThenNewServiceFuture<A, B>
where
A: NewService,
B: NewService<Request = A::Response>,
{
fut_b: B::Future,
fut_a: A::Future,
a: Option<A::Service>,
b: Option<B::Service>,
pin_project! {
pub(crate) struct AndThenServiceFactoryResponse<A, B, Req>
where
A: ServiceFactory<Req>,
B: ServiceFactory<A::Response>,
{
#[pin]
fut_a: A::Future,
#[pin]
fut_b: B::Future,
a: Option<A::Service>,
b: Option<B::Service>,
}
}
impl<A, B> AndThenNewServiceFuture<A, B>
impl<A, B, Req> AndThenServiceFactoryResponse<A, B, Req>
where
A: NewService,
B: NewService<Request = A::Response>,
A: ServiceFactory<Req>,
B: ServiceFactory<A::Response>,
{
fn new(fut_a: A::Future, fut_b: B::Future) -> Self {
AndThenNewServiceFuture {
AndThenServiceFactoryResponse {
fut_a,
fut_b,
a: None,
@@ -200,58 +240,61 @@ where
}
}
impl<A, B> Future for AndThenNewServiceFuture<A, B>
impl<A, B, Req> Future for AndThenServiceFactoryResponse<A, B, Req>
where
A: NewService,
B: NewService<Request = A::Response, Error = A::Error, InitError = A::InitError>,
A: ServiceFactory<Req>,
B: ServiceFactory<A::Response, Error = A::Error, InitError = A::InitError>,
{
type Item = AndThen<A::Service, B::Service>;
type Error = A::InitError;
type Output = Result<AndThenService<A::Service, B::Service, Req>, A::InitError>;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.a.is_none() {
if let Async::Ready(service) = self.fut_a.poll()? {
self.a = Some(service);
fn poll(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Self::Output> {
let this = self.project();
if this.a.is_none() {
if let Poll::Ready(service) = this.fut_a.poll(cx)? {
*this.a = Some(service);
}
}
if self.b.is_none() {
if let Async::Ready(service) = self.fut_b.poll()? {
self.b = Some(service);
if this.b.is_none() {
if let Poll::Ready(service) = this.fut_b.poll(cx)? {
*this.b = Some(service);
}
}
if self.a.is_some() && self.b.is_some() {
Ok(Async::Ready(AndThen::new(
self.a.take().unwrap(),
self.b.take().unwrap(),
if this.a.is_some() && this.b.is_some() {
Poll::Ready(Ok(AndThenService::new(
this.a.take().unwrap(),
this.b.take().unwrap(),
)))
} else {
Ok(Async::NotReady)
Poll::Pending
}
}
}
#[cfg(test)]
mod tests {
use futures::future::{ok, FutureResult};
use futures::{Async, Poll};
use std::cell::Cell;
use std::rc::Rc;
use alloc::rc::Rc;
use core::{
cell::Cell,
task::{Context, Poll},
};
use super::*;
use crate::{NewService, Service, ServiceExt};
use futures_util::future::lazy;
use crate::{
fn_factory, ok, pipeline, pipeline_factory, ready, Ready, Service, ServiceFactory,
};
struct Srv1(Rc<Cell<usize>>);
impl Service for Srv1 {
type Request = &'static str;
impl Service<&'static str> for Srv1 {
type Response = &'static str;
type Error = ();
type Future = FutureResult<Self::Response, ()>;
type Future = Ready<Result<Self::Response, ()>>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.set(self.0.get() + 1);
Ok(Async::Ready(()))
Poll::Ready(Ok(()))
}
fn call(&mut self, req: &'static str) -> Self::Future {
@@ -262,15 +305,14 @@ mod tests {
#[derive(Clone)]
struct Srv2(Rc<Cell<usize>>);
impl Service for Srv2 {
type Request = &'static str;
impl Service<&'static str> for Srv2 {
type Response = (&'static str, &'static str);
type Error = ();
type Future = FutureResult<Self::Response, ()>;
type Future = Ready<Result<Self::Response, ()>>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
fn poll_ready(&mut self, _: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self.0.set(self.0.get() + 1);
Ok(Async::Ready(()))
Poll::Ready(Ok(()))
}
fn call(&mut self, req: &'static str) -> Self::Future {
@@ -278,39 +320,35 @@ mod tests {
}
}
#[test]
fn test_poll_ready() {
#[actix_rt::test]
async fn test_poll_ready() {
let cnt = Rc::new(Cell::new(0));
let mut srv = Srv1(cnt.clone()).and_then(Srv2(cnt.clone()));
let res = srv.poll_ready();
assert!(res.is_ok());
assert_eq!(res.unwrap(), Async::Ready(()));
let mut srv = pipeline(Srv1(cnt.clone())).and_then(Srv2(cnt.clone()));
let res = lazy(|cx| srv.poll_ready(cx)).await;
assert_eq!(res, Poll::Ready(Ok(())));
assert_eq!(cnt.get(), 2);
}
#[test]
fn test_call() {
#[actix_rt::test]
async fn test_call() {
let cnt = Rc::new(Cell::new(0));
let mut srv = Srv1(cnt.clone()).and_then(Srv2(cnt));
let res = srv.call("srv1").poll();
let mut srv = pipeline(Srv1(cnt.clone())).and_then(Srv2(cnt));
let res = srv.call("srv1").await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), Async::Ready(("srv1", "srv2")));
assert_eq!(res.unwrap(), ("srv1", "srv2"));
}
#[test]
fn test_new_service() {
#[actix_rt::test]
async fn test_new_service() {
let cnt = Rc::new(Cell::new(0));
let cnt2 = cnt.clone();
let blank = move || Ok::<_, ()>(Srv1(cnt2.clone()));
let new_srv = blank
.into_new_service()
.and_then(move || Ok(Srv2(cnt.clone())));
if let Async::Ready(mut srv) = new_srv.new_service(&()).poll().unwrap() {
let res = srv.call("srv1").poll();
assert!(res.is_ok());
assert_eq!(res.unwrap(), Async::Ready(("srv1", "srv2")));
} else {
panic!()
}
let new_srv =
pipeline_factory(fn_factory(move || ready(Ok::<_, ()>(Srv1(cnt2.clone())))))
.and_then(move || ready(Ok(Srv2(cnt.clone()))));
let mut srv = new_srv.new_service(()).await.unwrap();
let res = srv.call("srv1").await;
assert!(res.is_ok());
assert_eq!(res.unwrap(), ("srv1", "srv2"));
}
}

View File

@@ -1,186 +0,0 @@
use std::rc::Rc;
use futures::{Async, Future, Poll};
use crate::and_then::AndThen;
use crate::from_err::FromErr;
use crate::{NewService, Transform};
/// `Apply` new service combinator
pub struct AndThenTransform<T, A, B> {
a: A,
b: B,
t: Rc<T>,
}
impl<T, A, B> AndThenTransform<T, A, B>
where
A: NewService,
B: NewService<Config = A::Config, InitError = A::InitError>,
T: Transform<B::Service, Request = A::Response, InitError = A::InitError>,
T::Error: From<A::Error>,
{
/// Create new `ApplyNewService` new service instance
pub fn new(t: T, a: A, b: B) -> Self {
Self {
a,
b,
t: Rc::new(t),
}
}
}
impl<T, A, B> Clone for AndThenTransform<T, A, B>
where
A: Clone,
B: Clone,
{
fn clone(&self) -> Self {
Self {
a: self.a.clone(),
b: self.b.clone(),
t: self.t.clone(),
}
}
}
impl<T, A, B> NewService for AndThenTransform<T, A, B>
where
A: NewService,
B: NewService<Config = A::Config, InitError = A::InitError>,
T: Transform<B::Service, Request = A::Response, InitError = A::InitError>,
T::Error: From<A::Error>,
{
type Request = A::Request;
type Response = T::Response;
type Error = T::Error;
type Config = A::Config;
type InitError = T::InitError;
type Service = AndThen<FromErr<A::Service, T::Error>, T::Transform>;
type Future = AndThenTransformFuture<T, A, B>;
fn new_service(&self, cfg: &A::Config) -> Self::Future {
AndThenTransformFuture {
a: None,
t: None,
t_cell: self.t.clone(),
fut_a: self.a.new_service(cfg),
fut_b: self.b.new_service(cfg),
fut_t: None,
}
}
}
pub struct AndThenTransformFuture<T, A, B>
where
A: NewService,
B: NewService<InitError = A::InitError>,
T: Transform<B::Service, Request = A::Response, InitError = A::InitError>,
T::Error: From<A::Error>,
{
fut_a: A::Future,
fut_b: B::Future,
fut_t: Option<T::Future>,
a: Option<A::Service>,
t: Option<T::Transform>,
t_cell: Rc<T>,
}
impl<T, A, B> Future for AndThenTransformFuture<T, A, B>
where
A: NewService,
B: NewService<InitError = A::InitError>,
T: Transform<B::Service, Request = A::Response, InitError = A::InitError>,
T::Error: From<A::Error>,
{
type Item = AndThen<FromErr<A::Service, T::Error>, T::Transform>;
type Error = T::InitError;
fn poll(&mut self) -> Poll<Self::Item, Self::Error> {
if self.fut_t.is_none() {
if let Async::Ready(service) = self.fut_b.poll()? {
self.fut_t = Some(self.t_cell.new_transform(service));
}
}
if self.a.is_none() {
if let Async::Ready(service) = self.fut_a.poll()? {
self.a = Some(service);
}
}
if let Some(ref mut fut) = self.fut_t {
if let Async::Ready(transform) = fut.poll()? {
self.t = Some(transform);
}
}
if self.a.is_some() && self.t.is_some() {
Ok(Async::Ready(AndThen::new(
FromErr::new(self.a.take().unwrap()),
self.t.take().unwrap(),
)))
} else {
Ok(Async::NotReady)
}
}
}
#[cfg(test)]
mod tests {
use futures::future::{ok, FutureResult};
use futures::{Async, Future, Poll};
use crate::{IntoNewService, IntoService, NewService, Service, ServiceExt};
#[derive(Clone)]
struct Srv;
impl Service for Srv {
type Request = ();
type Response = ();
type Error = ();
type Future = FutureResult<(), ()>;
fn poll_ready(&mut self) -> Poll<(), Self::Error> {
Ok(Async::Ready(()))
}
fn call(&mut self, _: ()) -> Self::Future {
ok(())
}
}
#[test]
fn test_apply() {
let blank = |req| Ok(req);
let mut srv = blank
.into_service()
.apply_fn(Srv, |req: &'static str, srv: &mut Srv| {
srv.call(()).map(move |res| (req, res))
});
assert!(srv.poll_ready().is_ok());
let res = srv.call("srv").poll();
assert!(res.is_ok());
assert_eq!(res.unwrap(), Async::Ready(("srv", ())));
}
#[test]
fn test_new_service() {
let blank = || Ok::<_, ()>((|req| Ok(req)).into_service());
let new_srv = blank.into_new_service().apply(
|req: &'static str, srv: &mut Srv| srv.call(()).map(move |res| (req, res)),
|| Ok(Srv),
);
if let Async::Ready(mut srv) = new_srv.new_service(&()).poll().unwrap() {
assert!(srv.poll_ready().is_ok());
let res = srv.call("srv").poll();
assert!(res.is_ok());
assert_eq!(res.unwrap(), Async::Ready(("srv", ())));
} else {
panic!()
}
}
}

Some files were not shown because too many files have changed in this diff Show More